micro_sam.sam_annotator.training_ui

  1import os
  2import warnings
  3
  4from qtpy import QtWidgets
  5# from napari.qt.threading import thread_worker
  6
  7import torch
  8from torch.utils.data import random_split
  9
 10import torch_em
 11
 12import micro_sam.util as util
 13from ._tooltips import get_tooltip
 14import micro_sam.sam_annotator._widgets as widgets
 15from micro_sam.training import default_sam_dataset, train_sam_for_configuration, CONFIGURATIONS
 16
 17
 18def _find_best_configuration():
 19    if torch.cuda.is_available():
 20
 21        # Check how much memory we have and select the best matching GPU
 22        # for the available VRAM size.
 23        _, vram = torch.cuda.mem_get_info()
 24        vram = vram / 1e9  # in GB
 25
 26        # Maybe we can get more configurations in the future.
 27        if vram > 80:  # More than 80 GB: use the A100 configurations.
 28            return "A100"
 29        elif vram > 30:  # More than 30 GB: use the V100 configurations.
 30            return "V100"
 31        elif vram > 14:  # More than 14 GB: use the RTX5000 configurations.
 32            return "rtx5000"
 33        else:  # Otherwise: not enough memory to train on the GPU, use CPU instead.
 34            return "CPU"
 35    else:
 36        return "CPU"
 37
 38
 39class TrainingWidget(widgets._WidgetBase):
 40    def __init__(self, parent=None):
 41        super().__init__(parent=parent)
 42
 43        # Create the UI: the general options.
 44        self._create_options()
 45
 46        # Add the settings (collapsible).
 47        self.layout().addWidget(self._create_settings())
 48
 49        # Add the run button to trigger the embedding computation.
 50        self.run_button = QtWidgets.QPushButton("Start Training")
 51        self.run_button.clicked.connect(self.__call__)
 52        self.layout().addWidget(self.run_button)
 53
 54    def _create_options(self):
 55        self.raw_path = None
 56        _, layout = self._add_path_param(
 57            "Path to images", self.raw_path, "both", placeholder="/path/to/images",
 58            tooltip=get_tooltip("training", "raw_path")
 59        )
 60        self.layout().addLayout(layout)
 61
 62        self.raw_key = None
 63        _, layout = self._add_string_param(
 64            "Image data key", self.raw_key, placeholder="e.g. \"*.tif\"",
 65            tooltip=get_tooltip("training", "raw_key")
 66        )
 67        self.layout().addLayout(layout)
 68
 69        self.label_path = None
 70        _, layout = self._add_path_param(
 71            "Path to labels", self.label_path, "both", placeholder="/path/to/labels",
 72            tooltip=get_tooltip("training", "label_path")
 73        )
 74        self.layout().addLayout(layout)
 75
 76        self.label_key = None
 77        _, layout = self._add_string_param(
 78            "Label data key", self.label_key, placeholder="e.g. \"*.tif\"",
 79            tooltip=get_tooltip("training", "label_key")
 80        )
 81        self.layout().addLayout(layout)
 82
 83        self.configuration = _find_best_configuration()
 84        self.setting_dropdown, layout = self._add_choice_param(
 85            "Configuration", self.configuration, list(CONFIGURATIONS.keys()),
 86            tooltip=get_tooltip("training", "configuration")
 87        )
 88        self.layout().addLayout(layout)
 89
 90        self.with_segmentation_decoder = True
 91        self.layout().addWidget(self._add_boolean_param(
 92            "With segmentation decoder", self.with_segmentation_decoder,
 93            tooltip=get_tooltip("training", "segmentation_decoder")
 94        ))
 95
 96    def _create_settings(self):
 97        setting_values = QtWidgets.QWidget()
 98        setting_values.setLayout(QtWidgets.QVBoxLayout())
 99
100        # TODO use CPU instead of MPS on MAC because training with MPS is slower!
101        # Device and patch shape settings.
102        self.device = "auto"
103        device_options = ["auto"] + util._available_devices()
104        self.device_dropdown, layout = self._add_choice_param(
105            "Device", self.device, device_options, tooltip=get_tooltip("training", "device")
106        )
107        setting_values.layout().addLayout(layout)
108
109        self.patch_x, self.patch_y = 512, 512
110        self.patch_x_param, self.patch_y_param, layout = self._add_shape_param(
111            ("Patch size x", "Patch size y"), (self.patch_x, self.patch_y), min_val=0, max_val=2048,
112            tooltip=get_tooltip("training", "patch")
113        )
114        setting_values.layout().addLayout(layout)
115
116        # Paths for validation data.
117        self.raw_path_val = None
118        _, layout = self._add_path_param(
119            "Path to validation images", self.raw_path_val, "both", placeholder="/path/to/images",
120            tooltip=get_tooltip("training", "raw_path_val")
121        )
122        setting_values.layout().addLayout(layout)
123
124        self.label_path_val = None
125        _, layout = self._add_path_param(
126            "Path to validation labels", self.label_path_val, "both", placeholder="/path/to/images",
127            tooltip=get_tooltip("training", "label_path_val")
128        )
129        setting_values.layout().addLayout(layout)
130
131        # Name of the model to be trained and options to over-ride the initial model
132        # on top of which the finetuning is run.
133        self.name = "sam_model"
134        self.name_param, layout = self._add_string_param(
135            "Model name", self.name, tooltip=get_tooltip("training", "name")
136        )
137        setting_values.layout().addLayout(layout)
138
139        self.initial_model = None
140        self.initial_model_param, layout = self._add_string_param(
141            "Initial model", self.initial_model, tooltip=get_tooltip("training", "initial_model")
142        )
143        setting_values.layout().addLayout(layout)
144
145        self.checkpoint = None
146        self.checkpoint_param, layout = self._add_string_param(
147            "Checkpoint", self.name, tooltip=get_tooltip("training", "checkpoint")
148        )
149        setting_values.layout().addLayout(layout)
150
151        self.output_path = None
152        self.output_path_param, layout = self._add_string_param(
153            "Output Path", self.output_path, tooltip=get_tooltip("training", "output_path")
154        )
155        setting_values.layout().addLayout(layout)
156
157        self.n_epochs = 100
158        self.n_epochs_param, layout = self._add_int_param(
159            "Number of epochs", self.n_epochs, tooltip=get_tooltip("training", "n_epochs"),
160            min_val=1, max_val=1000,
161        )
162        setting_values.layout().addLayout(layout)
163
164        settings = widgets._make_collapsible(setting_values, title="Advanced Settings")
165        return settings
166
167    def _get_loaders(self):
168        batch_size = 1
169        num_workers = 1 if str(self.device) == "cpu" else 4
170
171        patch_shape = (self.patch_x, self.patch_y)
172        dataset = default_sam_dataset(
173            str(self.raw_path), self.raw_key, str(self.label_path), self.label_key,
174            patch_shape=patch_shape, with_segmentation_decoder=self.with_segmentation_decoder,
175        )
176
177        raw_path_val, label_path_val = self.raw_path_val, self.label_path_val
178        if raw_path_val is None:
179            # Use 10% of the dataset - at least one image - for validation.
180            n_val = min(1, int(0.1 * len(dataset)))
181            train_dataset, val_dataset = random_split(dataset, lengths=[len(dataset) - n_val, n_val])
182        else:
183            train_dataset = dataset
184            val_dataset = default_sam_dataset(
185                str(raw_path_val), self.raw_key, str(label_path_val), self.label_key,
186                patch_shape=patch_shape, with_segmentation_decoder=self.with_segmentation_decoder,
187            )
188
189        train_loader = torch_em.segmentation.get_data_loader(
190            train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
191        )
192        val_loader = torch_em.segmentation.get_data_loader(
193            val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
194        )
195        return train_loader, val_loader
196
197    def _get_model_type(self):
198        # Consolidate initial model name, the checkpoint path and the model type according to the configuration.
199        if self.initial_model is None or self.initial_model in ("None", ""):
200            model_type = CONFIGURATIONS[self.configuration]["model_type"]
201        else:
202            model_type = self.initial_model[:5]
203            if model_type != CONFIGURATIONS[self.configuration]["model_type"]:
204                warnings.warn(
205                    f"You have changed the model type for your chosen configuration {self.configuration} "
206                    f"from {CONFIGURATIONS[self.configuration]['model_type']} to {model_type}. "
207                    "The training may be very slow or not work at all."
208                )
209        assert model_type is not None
210        return model_type
211
212    # Make sure that raw and label path have been passed.
213    # If they haven't raise an error message.
214    # (We could do a more extensive validation here, but for now keep it minimal.)
215    def _validate_inputs(self):
216        missing_raw = self.raw_path is None or not os.path.exists(self.raw_path)
217        missing_label = self.label_path is None or not os.path.exists(self.label_path)
218        if missing_raw or missing_label:
219            msg = ""
220            if missing_raw:
221                msg += "The path to raw data is missing or does not exist. "
222            if missing_label:
223                msg += "The path to label data is missing or does not exist."
224            return widgets._generate_message("error", msg)
225        return False
226
227    def __call__(self, skip_validate=False):
228        if not skip_validate and self._validate_inputs():
229            return
230
231        # Set up progress bar and signals for using it within a threadworker.
232        pbar, pbar_signals = widgets._create_pbar_for_threadworker()
233
234        model_type = self._get_model_type()
235        if self.checkpoint is None:
236            model_registry = util.models()
237            checkpoint_path = model_registry.fetch(model_type)
238        else:
239            checkpoint_path = self.checkpoint
240
241        # @thread_worker()
242        def run_training():
243            train_loader, val_loader = self._get_loaders()
244            train_sam_for_configuration(
245                name=self.name, configuration=self.configuration,
246                train_loader=train_loader, val_loader=val_loader,
247                checkpoint_path=checkpoint_path,
248                with_segmentation_decoder=self.with_segmentation_decoder,
249                model_type=model_type, device=self.device,
250                n_epochs=self.n_epochs, pbar_signals=pbar_signals,
251            )
252
253            # The best checkpoint after training.
254            export_checkpoint = os.path.join("checkpoints", self.name, "best.pt")
255            assert os.path.exists(export_checkpoint), export_checkpoint
256
257            # Export the model if an output path was given.
258            if self.output_path:
259
260                # If the output path has a pytorch specific ending then
261                # we just export the checkpoint.
262                if os.path.splitext(self.output_path)[1] in (".pt", ".pth"):
263                    util.export_custom_sam_model(
264                        checkpoint_path=export_checkpoint, model_type=model_type, save_path=self.output_path,
265                    )
266
267                # Otherwise we export it as bioimage.io model.
268                else:
269                    from micro_sam.bioimageio import export_sam_model
270
271                    # Load image and label image from the val loader.
272                    with torch.no_grad():
273                        image, label_image = next(iter(val_loader))
274                        image, label_image = image.cpu().numpy().squeeze(), label_image.cpu().numpy().squeeze()
275
276                    # Select the last channel of the label image if we have a channel axis.
277                    # (This contains the labels.)
278                    if label_image.ndim == 3:
279                        label_image = label_image[0]
280                    assert image.shape == label_image.shape
281                    label_image = label_image.astype("uint32")
282
283                    export_sam_model(
284                        image=image,
285                        label_image=label_image,
286                        model_type=model_type,
287                        name=self.name,
288                        output_path=self.output_path,
289                        checkpoint_path=export_checkpoint,
290                    )
291
292                pbar_signals.pbar_stop.emit()
293                return self.output_path
294
295            else:
296                pbar_signals.pbar_stop.emit()
297                return export_checkpoint
298
299        path = run_training()
300        print(f"Training has finished. The trained model is saved at {path}.")
301        # worker = run_training()
302        # worker.returned.connect(lambda path: print(f"Training has finished. The trained model is saved at {path}."))
303        # worker.start()
304        # return worker
class TrainingWidget(micro_sam.sam_annotator._widgets._WidgetBase):
 40class TrainingWidget(widgets._WidgetBase):
 41    def __init__(self, parent=None):
 42        super().__init__(parent=parent)
 43
 44        # Create the UI: the general options.
 45        self._create_options()
 46
 47        # Add the settings (collapsible).
 48        self.layout().addWidget(self._create_settings())
 49
 50        # Add the run button to trigger the embedding computation.
 51        self.run_button = QtWidgets.QPushButton("Start Training")
 52        self.run_button.clicked.connect(self.__call__)
 53        self.layout().addWidget(self.run_button)
 54
 55    def _create_options(self):
 56        self.raw_path = None
 57        _, layout = self._add_path_param(
 58            "Path to images", self.raw_path, "both", placeholder="/path/to/images",
 59            tooltip=get_tooltip("training", "raw_path")
 60        )
 61        self.layout().addLayout(layout)
 62
 63        self.raw_key = None
 64        _, layout = self._add_string_param(
 65            "Image data key", self.raw_key, placeholder="e.g. \"*.tif\"",
 66            tooltip=get_tooltip("training", "raw_key")
 67        )
 68        self.layout().addLayout(layout)
 69
 70        self.label_path = None
 71        _, layout = self._add_path_param(
 72            "Path to labels", self.label_path, "both", placeholder="/path/to/labels",
 73            tooltip=get_tooltip("training", "label_path")
 74        )
 75        self.layout().addLayout(layout)
 76
 77        self.label_key = None
 78        _, layout = self._add_string_param(
 79            "Label data key", self.label_key, placeholder="e.g. \"*.tif\"",
 80            tooltip=get_tooltip("training", "label_key")
 81        )
 82        self.layout().addLayout(layout)
 83
 84        self.configuration = _find_best_configuration()
 85        self.setting_dropdown, layout = self._add_choice_param(
 86            "Configuration", self.configuration, list(CONFIGURATIONS.keys()),
 87            tooltip=get_tooltip("training", "configuration")
 88        )
 89        self.layout().addLayout(layout)
 90
 91        self.with_segmentation_decoder = True
 92        self.layout().addWidget(self._add_boolean_param(
 93            "With segmentation decoder", self.with_segmentation_decoder,
 94            tooltip=get_tooltip("training", "segmentation_decoder")
 95        ))
 96
 97    def _create_settings(self):
 98        setting_values = QtWidgets.QWidget()
 99        setting_values.setLayout(QtWidgets.QVBoxLayout())
100
101        # TODO use CPU instead of MPS on MAC because training with MPS is slower!
102        # Device and patch shape settings.
103        self.device = "auto"
104        device_options = ["auto"] + util._available_devices()
105        self.device_dropdown, layout = self._add_choice_param(
106            "Device", self.device, device_options, tooltip=get_tooltip("training", "device")
107        )
108        setting_values.layout().addLayout(layout)
109
110        self.patch_x, self.patch_y = 512, 512
111        self.patch_x_param, self.patch_y_param, layout = self._add_shape_param(
112            ("Patch size x", "Patch size y"), (self.patch_x, self.patch_y), min_val=0, max_val=2048,
113            tooltip=get_tooltip("training", "patch")
114        )
115        setting_values.layout().addLayout(layout)
116
117        # Paths for validation data.
118        self.raw_path_val = None
119        _, layout = self._add_path_param(
120            "Path to validation images", self.raw_path_val, "both", placeholder="/path/to/images",
121            tooltip=get_tooltip("training", "raw_path_val")
122        )
123        setting_values.layout().addLayout(layout)
124
125        self.label_path_val = None
126        _, layout = self._add_path_param(
127            "Path to validation labels", self.label_path_val, "both", placeholder="/path/to/images",
128            tooltip=get_tooltip("training", "label_path_val")
129        )
130        setting_values.layout().addLayout(layout)
131
132        # Name of the model to be trained and options to over-ride the initial model
133        # on top of which the finetuning is run.
134        self.name = "sam_model"
135        self.name_param, layout = self._add_string_param(
136            "Model name", self.name, tooltip=get_tooltip("training", "name")
137        )
138        setting_values.layout().addLayout(layout)
139
140        self.initial_model = None
141        self.initial_model_param, layout = self._add_string_param(
142            "Initial model", self.initial_model, tooltip=get_tooltip("training", "initial_model")
143        )
144        setting_values.layout().addLayout(layout)
145
146        self.checkpoint = None
147        self.checkpoint_param, layout = self._add_string_param(
148            "Checkpoint", self.name, tooltip=get_tooltip("training", "checkpoint")
149        )
150        setting_values.layout().addLayout(layout)
151
152        self.output_path = None
153        self.output_path_param, layout = self._add_string_param(
154            "Output Path", self.output_path, tooltip=get_tooltip("training", "output_path")
155        )
156        setting_values.layout().addLayout(layout)
157
158        self.n_epochs = 100
159        self.n_epochs_param, layout = self._add_int_param(
160            "Number of epochs", self.n_epochs, tooltip=get_tooltip("training", "n_epochs"),
161            min_val=1, max_val=1000,
162        )
163        setting_values.layout().addLayout(layout)
164
165        settings = widgets._make_collapsible(setting_values, title="Advanced Settings")
166        return settings
167
168    def _get_loaders(self):
169        batch_size = 1
170        num_workers = 1 if str(self.device) == "cpu" else 4
171
172        patch_shape = (self.patch_x, self.patch_y)
173        dataset = default_sam_dataset(
174            str(self.raw_path), self.raw_key, str(self.label_path), self.label_key,
175            patch_shape=patch_shape, with_segmentation_decoder=self.with_segmentation_decoder,
176        )
177
178        raw_path_val, label_path_val = self.raw_path_val, self.label_path_val
179        if raw_path_val is None:
180            # Use 10% of the dataset - at least one image - for validation.
181            n_val = min(1, int(0.1 * len(dataset)))
182            train_dataset, val_dataset = random_split(dataset, lengths=[len(dataset) - n_val, n_val])
183        else:
184            train_dataset = dataset
185            val_dataset = default_sam_dataset(
186                str(raw_path_val), self.raw_key, str(label_path_val), self.label_key,
187                patch_shape=patch_shape, with_segmentation_decoder=self.with_segmentation_decoder,
188            )
189
190        train_loader = torch_em.segmentation.get_data_loader(
191            train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
192        )
193        val_loader = torch_em.segmentation.get_data_loader(
194            val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
195        )
196        return train_loader, val_loader
197
198    def _get_model_type(self):
199        # Consolidate initial model name, the checkpoint path and the model type according to the configuration.
200        if self.initial_model is None or self.initial_model in ("None", ""):
201            model_type = CONFIGURATIONS[self.configuration]["model_type"]
202        else:
203            model_type = self.initial_model[:5]
204            if model_type != CONFIGURATIONS[self.configuration]["model_type"]:
205                warnings.warn(
206                    f"You have changed the model type for your chosen configuration {self.configuration} "
207                    f"from {CONFIGURATIONS[self.configuration]['model_type']} to {model_type}. "
208                    "The training may be very slow or not work at all."
209                )
210        assert model_type is not None
211        return model_type
212
213    # Make sure that raw and label path have been passed.
214    # If they haven't raise an error message.
215    # (We could do a more extensive validation here, but for now keep it minimal.)
216    def _validate_inputs(self):
217        missing_raw = self.raw_path is None or not os.path.exists(self.raw_path)
218        missing_label = self.label_path is None or not os.path.exists(self.label_path)
219        if missing_raw or missing_label:
220            msg = ""
221            if missing_raw:
222                msg += "The path to raw data is missing or does not exist. "
223            if missing_label:
224                msg += "The path to label data is missing or does not exist."
225            return widgets._generate_message("error", msg)
226        return False
227
228    def __call__(self, skip_validate=False):
229        if not skip_validate and self._validate_inputs():
230            return
231
232        # Set up progress bar and signals for using it within a threadworker.
233        pbar, pbar_signals = widgets._create_pbar_for_threadworker()
234
235        model_type = self._get_model_type()
236        if self.checkpoint is None:
237            model_registry = util.models()
238            checkpoint_path = model_registry.fetch(model_type)
239        else:
240            checkpoint_path = self.checkpoint
241
242        # @thread_worker()
243        def run_training():
244            train_loader, val_loader = self._get_loaders()
245            train_sam_for_configuration(
246                name=self.name, configuration=self.configuration,
247                train_loader=train_loader, val_loader=val_loader,
248                checkpoint_path=checkpoint_path,
249                with_segmentation_decoder=self.with_segmentation_decoder,
250                model_type=model_type, device=self.device,
251                n_epochs=self.n_epochs, pbar_signals=pbar_signals,
252            )
253
254            # The best checkpoint after training.
255            export_checkpoint = os.path.join("checkpoints", self.name, "best.pt")
256            assert os.path.exists(export_checkpoint), export_checkpoint
257
258            # Export the model if an output path was given.
259            if self.output_path:
260
261                # If the output path has a pytorch specific ending then
262                # we just export the checkpoint.
263                if os.path.splitext(self.output_path)[1] in (".pt", ".pth"):
264                    util.export_custom_sam_model(
265                        checkpoint_path=export_checkpoint, model_type=model_type, save_path=self.output_path,
266                    )
267
268                # Otherwise we export it as bioimage.io model.
269                else:
270                    from micro_sam.bioimageio import export_sam_model
271
272                    # Load image and label image from the val loader.
273                    with torch.no_grad():
274                        image, label_image = next(iter(val_loader))
275                        image, label_image = image.cpu().numpy().squeeze(), label_image.cpu().numpy().squeeze()
276
277                    # Select the last channel of the label image if we have a channel axis.
278                    # (This contains the labels.)
279                    if label_image.ndim == 3:
280                        label_image = label_image[0]
281                    assert image.shape == label_image.shape
282                    label_image = label_image.astype("uint32")
283
284                    export_sam_model(
285                        image=image,
286                        label_image=label_image,
287                        model_type=model_type,
288                        name=self.name,
289                        output_path=self.output_path,
290                        checkpoint_path=export_checkpoint,
291                    )
292
293                pbar_signals.pbar_stop.emit()
294                return self.output_path
295
296            else:
297                pbar_signals.pbar_stop.emit()
298                return export_checkpoint
299
300        path = run_training()
301        print(f"Training has finished. The trained model is saved at {path}.")
302        # worker = run_training()
303        # worker.returned.connect(lambda path: print(f"Training has finished. The trained model is saved at {path}."))
304        # worker.start()
305        # return worker

QWidget(parent: typing.Optional[QWidget] = None, flags: Union[Qt.WindowFlags, Qt.WindowType] = Qt.WindowFlags())

TrainingWidget(parent=None)
41    def __init__(self, parent=None):
42        super().__init__(parent=parent)
43
44        # Create the UI: the general options.
45        self._create_options()
46
47        # Add the settings (collapsible).
48        self.layout().addWidget(self._create_settings())
49
50        # Add the run button to trigger the embedding computation.
51        self.run_button = QtWidgets.QPushButton("Start Training")
52        self.run_button.clicked.connect(self.__call__)
53        self.layout().addWidget(self.run_button)
run_button
Inherited Members
PyQt5.QtWidgets.QWidget
RenderFlag
RenderFlags
acceptDrops
accessibleDescription
accessibleName
actionEvent
actions
activateWindow
addAction
addActions
adjustSize
autoFillBackground
backgroundRole
baseSize
changeEvent
childAt
childrenRect
childrenRegion
clearFocus
clearMask
close
closeEvent
contentsMargins
contentsRect
contextMenuEvent
contextMenuPolicy
create
createWindowContainer
cursor
destroy
devType
dragEnterEvent
dragLeaveEvent
dragMoveEvent
dropEvent
effectiveWinId
ensurePolished
enterEvent
event
find
focusInEvent
focusNextChild
focusNextPrevChild
focusOutEvent
focusPolicy
focusPreviousChild
focusProxy
focusWidget
font
fontInfo
fontMetrics
foregroundRole
frameGeometry
frameSize
geometry
getContentsMargins
grab
grabGesture
grabKeyboard
grabMouse
grabShortcut
graphicsEffect
graphicsProxyWidget
hasFocus
hasHeightForWidth
hasMouseTracking
hasTabletTracking
height
heightForWidth
hide
hideEvent
initPainter
inputMethodEvent
inputMethodHints
inputMethodQuery
insertAction
insertActions
isActiveWindow
isAncestorOf
isEnabled
isEnabledTo
isFullScreen
isHidden
isLeftToRight
isMaximized
isMinimized
isModal
isRightToLeft
isVisible
isVisibleTo
isWindow
isWindowModified
keyPressEvent
keyReleaseEvent
keyboardGrabber
layout
layoutDirection
leaveEvent
locale
lower
mapFrom
mapFromGlobal
mapFromParent
mapTo
mapToGlobal
mapToParent
mask
maximumHeight
maximumSize
maximumWidth
metric
minimumHeight
minimumSize
minimumSizeHint
minimumWidth
mouseDoubleClickEvent
mouseGrabber
mouseMoveEvent
mousePressEvent
mouseReleaseEvent
move
moveEvent
nativeEvent
nativeParentWidget
nextInFocusChain
normalGeometry
overrideWindowFlags
overrideWindowState
paintEngine
paintEvent
palette
parentWidget
pos
previousInFocusChain
raise_
rect
releaseKeyboard
releaseMouse
releaseShortcut
removeAction
render
repaint
resize
resizeEvent
restoreGeometry
saveGeometry
screen
scroll
setAcceptDrops
setAccessibleDescription
setAccessibleName
setAttribute
setAutoFillBackground
setBackgroundRole
setBaseSize
setContentsMargins
setContextMenuPolicy
setCursor
setDisabled
setEnabled
setFixedHeight
setFixedSize
setFixedWidth
setFocus
setFocusPolicy
setFocusProxy
setFont
setForegroundRole
setGeometry
setGraphicsEffect
setHidden
setInputMethodHints
setLayout
setLayoutDirection
setLocale
setMask
setMaximumHeight
setMaximumSize
setMaximumWidth
setMinimumHeight
setMinimumSize
setMinimumWidth
setMouseTracking
setPalette
setParent
setShortcutAutoRepeat
setShortcutEnabled
setSizeIncrement
setSizePolicy
setStatusTip
setStyle
setStyleSheet
setTabOrder
setTabletTracking
setToolTip
setToolTipDuration
setUpdatesEnabled
setVisible
setWhatsThis
setWindowFilePath
setWindowFlag
setWindowFlags
setWindowIcon
setWindowIconText
setWindowModality
setWindowModified
setWindowOpacity
setWindowRole
setWindowState
setWindowTitle
sharedPainter
show
showEvent
showFullScreen
showMaximized
showMinimized
showNormal
size
sizeHint
sizeIncrement
sizePolicy
stackUnder
statusTip
style
styleSheet
tabletEvent
testAttribute
toolTip
toolTipDuration
underMouse
ungrabGesture
unsetCursor
unsetLayoutDirection
unsetLocale
update
updateGeometry
updateMicroFocus
updatesEnabled
visibleRegion
whatsThis
wheelEvent
width
winId
window
windowFilePath
windowFlags
windowHandle
windowIcon
windowIconText
windowModality
windowOpacity
windowRole
windowState
windowTitle
windowType
x
y
DrawChildren
DrawWindowBackground
IgnoreMask
windowIconTextChanged
windowIconChanged
windowTitleChanged
customContextMenuRequested
PyQt5.QtCore.QObject
blockSignals
childEvent
children
connectNotify
customEvent
deleteLater
disconnect
disconnectNotify
dumpObjectInfo
dumpObjectTree
dynamicPropertyNames
eventFilter
findChild
findChildren
inherits
installEventFilter
isSignalConnected
isWidgetType
isWindowType
killTimer
metaObject
moveToThread
objectName
parent
property
pyqtConfigure
receivers
removeEventFilter
sender
senderSignalIndex
setObjectName
setProperty
signalsBlocked
startTimer
thread
timerEvent
tr
staticMetaObject
objectNameChanged
destroyed
PyQt5.QtGui.QPaintDevice
PaintDeviceMetric
colorCount
depth
devicePixelRatio
devicePixelRatioF
devicePixelRatioFScale
heightMM
logicalDpiX
logicalDpiY
paintingActive
physicalDpiX
physicalDpiY
widthMM
PdmDepth
PdmDevicePixelRatio
PdmDevicePixelRatioScaled
PdmDpiX
PdmDpiY
PdmHeight
PdmHeightMM
PdmNumColors
PdmPhysicalDpiX
PdmPhysicalDpiY
PdmWidth
PdmWidthMM