@testing-library/react#getAllByText TypeScript Examples

The following examples show how to use @testing-library/react#getAllByText. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExperimentResults.test.tsx    From abacus with GNU General Public License v2.0 6 votes vote down vote up
test('renders the condensed table with some analyses in non-debug mode for a Conversion Metric', async () => {
  const { container } = render(<ExperimentResults analyses={analyses} experiment={experiment} metrics={metrics} />)

  // In non-debug mode, we shouldn't have a <pre> element with the JSON.
  expect(container.querySelector('.debug-json')).toBeNull()

  // Check the table snapshot before expanding any metric.
  expect(container.querySelector('.analysis-latest-results')).toMatchSnapshot()

  // Clicking on metric_1 or metric_2 should have no effect on anything, but metric_3 should render the details.
  fireEvent.click(getByText(container, /metric_1/))
  fireEvent.click(getAllByText(container, /metric_2/)[0])
  fireEvent.click(getByText(container, /metric_3/))
  await waitFor(() => getAllByText(container, /Last analyzed/), { container })
  expect(container.querySelector('.analysis-latest-results .analysis-detail-panel')).toMatchSnapshot()
  fireEvent.click(screen.getAllByRole('button', { name: /"Observed" data/ })[0])

  expect(mockedPlot).toMatchSnapshot()
})
Example #2
Source File: ExperimentResults.test.tsx    From abacus with GNU General Public License v2.0 6 votes vote down vote up
test('renders the condensed table with some analyses in non-debug mode for a Revenue Metric', async () => {
  const metrics = Fixtures.createMetrics().map((metric) => ({
    ...metric,
    parameterType: MetricParameterType.Revenue,
  }))

  const { container } = render(<ExperimentResults analyses={analyses} experiment={experiment} metrics={metrics} />)

  // In non-debug mode, we shouldn't have a <pre> element with the JSON.
  expect(container.querySelector('.debug-json')).toBeNull()

  // Check the table snapshot before expanding any metric.
  expect(container.querySelector('.analysis-latest-results')).toMatchSnapshot()

  // Clicking on metric_1 or metric_2 should have no effect on anything, but metric_3 should render the details.
  fireEvent.click(getByText(container, /metric_1/))
  fireEvent.click(getAllByText(container, /metric_2/)[0])
  fireEvent.click(getByText(container, /metric_3/))
  await waitFor(() => getAllByText(container, /Last analyzed/), { container })
  expect(container.querySelector('.analysis-latest-results .analysis-detail-panel')).toMatchSnapshot()
  fireEvent.click(screen.getAllByRole('button', { name: /"Observed" data/ })[0])

  expect(mockedPlot).toMatchSnapshot()
})
Example #3
Source File: ExperimentResults.test.tsx    From abacus with GNU General Public License v2.0 5 votes vote down vote up
test('renders correctly for 1 analysis datapoint, not statistically significant', async () => {
  const metricEstimates = {
    variations: {
      '1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
      '2': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
    },
    diffs: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: -1,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
    ratios: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
  }
  const { container } = render(
    <ExperimentResults
      analyses={[
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.PpNaive, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.IttPure, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.MittNoCrossovers, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.MittNoSpammers, metricEstimates }),
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.MittNoSpammersNoCrossovers,
          metricEstimates,
        }),
      ]}
      experiment={experiment}
      metrics={metrics}
    />,
  )

  // Check the table snapshot before expanding any metric.
  expect(container.querySelector('.analysis-latest-results')).toMatchSnapshot()

  // Clicking on metric_1 or metric_2 should have no effect on anything, but metric_3 should render the details.
  fireEvent.click(getByText(container, /metric_1/))
  fireEvent.click(getAllByText(container, /metric_2/)[0])
  fireEvent.click(getByText(container, /metric_3/))
  await waitFor(() => getByText(container, /Last analyzed/), { container })
  expect(container.querySelector('.analysis-latest-results .analysis-detail-panel')).toMatchSnapshot()

  expect(mockedPlot).toMatchSnapshot()
})
Example #4
Source File: ExperimentResults.test.tsx    From abacus with GNU General Public License v2.0 5 votes vote down vote up
test('renders correctly for 1 analysis datapoint, statistically significant', async () => {
  const metricEstimates = {
    variations: {
      '1': Fixtures.createDistributionStats({
        top_95: 2,
        bottom_95: 1,
        mean: 1,
      }),
      '2': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
    },
    diffs: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
    ratios: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
  }
  const { container } = render(
    <ExperimentResults
      analyses={[
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.PpNaive, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.IttPure, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.MittNoCrossovers, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.MittNoSpammers, metricEstimates }),
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.MittNoSpammersNoCrossovers,
          metricEstimates,
        }),
      ]}
      experiment={experiment}
      metrics={metrics}
    />,
  )

  // Check the table snapshot before expanding any metric.
  expect(container.querySelector('.analysis-latest-results')).toMatchSnapshot()

  // Clicking on metric_1 or metric_2 should have no effect on anything, but metric_3 should render the details.
  fireEvent.click(getByText(container, /metric_1/))
  fireEvent.click(getAllByText(container, /metric_2/)[0])
  fireEvent.click(getByText(container, /metric_3/))
  await waitFor(() => getByText(container, /Last analyzed/), { container })
  expect(container.querySelector('.analysis-latest-results .analysis-detail-panel')).toMatchSnapshot()

  expect(mockedPlot).toMatchSnapshot()
})
Example #5
Source File: ExperimentResults.test.tsx    From abacus with GNU General Public License v2.0 4 votes vote down vote up
test('A/B/n: renders correctly for 1 analysis datapoint, not statistically significant', async () => {
  const experiment = Fixtures.createExperimentFull({
    variations: [
      Fixtures.createVariation({
        variationId: 1,
        name: 'control',
        isDefault: true,
        allocatedPercentage: 40,
      }),
      Fixtures.createVariation({
        variationId: 2,
        name: 'treatment1',
        isDefault: false,
        allocatedPercentage: 40,
      }),
      Fixtures.createVariation({
        variationId: 3,
        name: 'treatment2',
        isDefault: false,
        allocatedPercentage: 20,
      }),
    ],
  })
  const metricEstimates = {
    variations: {
      '1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
      '2': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
      '3': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
    },
    diffs: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: -1,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
      '3_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: -1,
        mean: 0,
      }),
      '1_3': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
      '3_2': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: -1,
        mean: 0,
      }),
      '2_3': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
    ratios: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
      '3_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_3': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
      '3_2': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '2_3': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
  }
  const { container } = render(
    <ExperimentResults
      analyses={[
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.PpNaive, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.IttPure, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.MittNoCrossovers, metricEstimates }),
        Fixtures.createAnalysis({ analysisStrategy: AnalysisStrategy.MittNoSpammers, metricEstimates }),
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.MittNoSpammersNoCrossovers,
          metricEstimates,
        }),
      ]}
      experiment={experiment}
      metrics={metrics}
    />,
  )

  // Check the table snapshot before expanding any metric.
  expect(container.querySelector('.analysis-latest-results')).toMatchSnapshot()

  // Clicking on metric_1 or metric_2 should have no effect on anything, but metric_3 should render the details.
  fireEvent.click(getByText(container, /metric_1/))
  fireEvent.click(getAllByText(container, /metric_2/)[0])
  fireEvent.click(getByText(container, /metric_3/))
  await waitFor(() => getByText(container, /Last analyzed/), { container })
  expect(container.querySelector('.analysis-latest-results .analysis-detail-panel')).toMatchSnapshot()

  expect(mockedPlot).toMatchSnapshot()
})
Example #6
Source File: ExperimentResults.test.tsx    From abacus with GNU General Public License v2.0 4 votes vote down vote up
test('renders correctly for conflicting analysis data', async () => {
  toggleDebugMode()

  const metricEstimates1 = {
    variations: {
      '1': Fixtures.createDistributionStats({
        top_95: 2,
        bottom_95: 1,
        mean: 1,
      }),
      '2': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
    },
    diffs: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
    ratios: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
  }
  const metricEstimates2 = {
    variations: {
      '1': Fixtures.createDistributionStats({
        top_95: 2,
        bottom_95: 1,
        mean: 1,
      }),
      '2': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 1,
      }),
    },
    diffs: {
      '2_1': Fixtures.createDistributionStats({
        top_95: -1,
        bottom_95: -2,
        mean: -1.4,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
    ratios: {
      '2_1': Fixtures.createDistributionStats({
        top_95: 1,
        bottom_95: 0.5,
        mean: 0,
      }),
      '1_2': Fixtures.createDistributionStats({
        top_95: 0,
        bottom_95: 0,
        mean: 0,
      }),
    },
  }

  const { container } = render(
    <ExperimentResults
      analyses={[
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.PpNaive,
          metricEstimates: metricEstimates1,
        }),
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.IttPure,
          metricEstimates: metricEstimates2,
        }),
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.MittNoCrossovers,
          metricEstimates: metricEstimates2,
        }),
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.MittNoSpammers,
          metricEstimates: metricEstimates2,
        }),
        Fixtures.createAnalysis({
          analysisStrategy: AnalysisStrategy.MittNoSpammersNoCrossovers,
          metricEstimates: metricEstimates2,
        }),
      ]}
      experiment={experiment}
      metrics={metrics}
    />,
  )

  // Check the table snapshot before expanding any metric.
  expect(container.querySelector('.analysis-latest-results')).toMatchSnapshot()

  // Clicking on metric_1 or metric_2 should have no effect on anything, but metric_3 should render the details.
  fireEvent.click(getByText(container, /metric_1/))
  fireEvent.click(getAllByText(container, /metric_2/)[0])
  fireEvent.click(getByText(container, /metric_3/))
  await waitFor(() => getByText(container, /Last analyzed/), { container })
  expect(container.querySelector('.analysis-latest-results .analysis-detail-panel')).toMatchSnapshot()

  expect(mockedPlot).toMatchSnapshot()

  toggleDebugMode()
})
Example #7
Source File: EnumerationMappingEditor.test.tsx    From legend-studio with Apache License 2.0 4 votes vote down vote up
test.only(
  integrationTest('Enumeration mapping editor basic functionality'),
  async () => {
    await TEST__openElementFromExplorerTree('demo::MyMap', renderResult);
    const editPanelHeader = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.EDIT_PANEL__HEADER_TABS),
    );
    await waitFor(() => getByText(editPanelHeader, 'MyMap'));
    const mappingExplorer = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.MAPPING_EXPLORER),
    );
    await waitFor(() => getByText(mappingExplorer, 'Enum_1'));
    await waitFor(() => getByText(mappingExplorer, 'Enum_2'));
    // open Enum_1 [enumToEnum] enumeration mapping
    await waitFor(() => getByText(mappingExplorer, 'Enum_1 [enumToEnum]'));
    fireEvent.click(getByText(mappingExplorer, 'Enum_1 [enumToEnum]'));
    // Enum_1 [enumToEnum] mapping source values
    let sourcePanel = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.SOURCE_PANEL),
    );
    await waitFor(() => getByText(sourcePanel, 'Enum_2'));
    await waitFor(() => getByText(sourcePanel, 'zero'));
    await waitFor(() => getByText(sourcePanel, 'one'));
    let mainEditor = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.MAIN_EDITOR),
    );
    // Enum_1 [enumToEnum] mapping source value labels
    await waitFor(() => getByText(mainEditor, '_0'));
    await waitFor(() => getByText(mainEditor, '_1'));
    // Enum_1 [enumToEnum] mapping inputs
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="zero"]`)),
    ).not.toBeNull();
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="one"]`)),
    ).not.toBeNull();
    // Enum_1 [enumToEnum] mapping input return types
    let returnTypes = await waitFor(() => getAllByText(mainEditor, 'Enum_2'));
    expect(returnTypes).toHaveLength(4);
    // open enum_2 enumeration mapping
    fireEvent.click(getByText(mappingExplorer, 'Enum_2'));
    sourcePanel = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.SOURCE_PANEL),
    );
    await waitFor(() => getByText(sourcePanel, 'String'));
    mainEditor = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.MAIN_EDITOR),
    );
    // enum_2 mapping source value labels
    await waitFor(() => getByText(mainEditor, 'one'));
    await waitFor(() => getByText(mainEditor, 'zero'));
    // enum_2 mapping inputs
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="0"]`)),
    ).not.toBeNull();
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="1"]`)),
    ).not.toBeNull();
    // enum_2 mapping input return types
    returnTypes = await waitFor(() => getAllByText(mainEditor, 'String'));
    expect(returnTypes).toHaveLength(4);
    // open enum_1 enumeration mapping
    fireEvent.click(getByText(mappingExplorer, 'Enum_1'));
    sourcePanel = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.SOURCE_PANEL),
    );
    await waitFor(() => getByText(sourcePanel, 'String'));
    mainEditor = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.MAIN_EDITOR),
    );
    // enum_2 mapping source value labels
    await waitFor(() => getByText(mainEditor, '_0'));
    await waitFor(() => getByText(mainEditor, '_1'));
    // enum_2 mapping inputs
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="false"]`)),
    ).not.toBeNull();
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="0"]`)),
    ).not.toBeNull();
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="true"]`)),
    ).not.toBeNull();
    expect(
      await waitFor(() => mainEditor.querySelector(`input[value="1"]`)),
    ).not.toBeNull();
    // enum_2 mapping input return types
    returnTypes = await waitFor(() => getAllByText(mainEditor, 'String'));
    expect(returnTypes).toHaveLength(6);

    // test tabs
    const mappingEditorState =
      mockedEditorStore.getCurrentEditorState(MappingEditorState);
    expect(mappingEditorState.openedTabStates).toHaveLength(3);
    const mappingTabs = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.EDITOR__TABS__HEADER),
    );
    fireEvent.click(getByText(mappingTabs, 'Enum_1 [enumToEnum]'));
    mainEditor = await waitFor(() =>
      renderResult.getByTestId(LEGEND_STUDIO_TEST_ID.MAIN_EDITOR),
    );
    await waitFor(() => getAllByText(mainEditor, 'Enum_2'));
    // close
    fireEvent.click(getAllByTitle(mappingTabs, 'Close')[0] as HTMLElement);
    expect(mappingEditorState.openedTabStates).toHaveLength(2);
    fireEvent.click(getAllByTitle(mappingTabs, 'Close')[0] as HTMLElement);
    expect(mappingEditorState.openedTabStates).toHaveLength(1);
    fireEvent.click(getAllByTitle(mappingTabs, 'Close')[0] as HTMLElement);
    // assert no current tab state
    expect(mappingEditorState.openedTabStates).toHaveLength(0);
    expect(mappingEditorState.currentTabState).toBeUndefined();
  },
);
Example #8
Source File: UMLEditor.test.tsx    From legend-studio with Apache License 2.0 4 votes vote down vote up
test(
  integrationTest('Class editor without constraints and derived properties'),
  async () => {
    await TEST__openElementFromExplorerTree('ui::TestClass', renderResult);
    const editPanelHeader = renderResult.getByTestId(
      LEGEND_STUDIO_TEST_ID.EDIT_PANEL__HEADER_TABS,
    );
    expect(getByText(editPanelHeader, 'TestClass')).not.toBeNull();
    const classForm = renderResult.getByTestId(
      LEGEND_STUDIO_TEST_ID.CLASS_FORM_EDITOR,
    );
    // Normal properties
    const classProperties = ['a', 'b', 'name', 'person'];
    classProperties.forEach((t) =>
      expect(getByDisplayValue(classForm, t)).not.toBeNull(),
    );
    // Supertype propertes
    const superTypeProperties = [
      'legs',
      'arms',
      'planet',
      'description',
      'founder',
    ];
    superTypeProperties.forEach((superTypeProperty) => {
      // input fields for super type property name are not present/disabled
      expect(queryByDisplayValue(classForm, superTypeProperty)).toBeNull();
      expect(queryByText(classForm, superTypeProperty)).not.toBeNull();
    });
    // Association properties
    const associationProperties = ['testClassSibling'];
    associationProperties.forEach((associationProperty) => {
      // input fields for association property name are not present/disabled
      expect(queryByDisplayValue(classForm, associationProperty)).toBeNull();
      expect(queryByText(classForm, associationProperty)).not.toBeNull();
    });
    // SuperTypes
    fireEvent.click(getByText(classForm, 'Super Types'));
    await waitFor(() => getByText(classForm, 'Animal'));
    // TaggedValues
    fireEvent.click(getByText(classForm, 'Tagged Values'));
    await waitFor(() => getByText(classForm, 'ProfileTest'));
    expect(getByText(classForm, 'tag1')).not.toBeNull();
    expect(getByDisplayValue(classForm, 'test')).not.toBeNull();
    // Stereotypes
    fireEvent.click(getByText(classForm, 'Stereotypes'));
    await waitFor(() => getByText(classForm, 'ProfileTest'));
    expect(getByText(classForm, 'stereotype1')).not.toBeNull();
    // Back to properties. Test more rigorous
    fireEvent.click(getByText(classForm, 'Properties'));
    await waitFor(() => getByText(classForm, 'founder'));
    const inputA = getByDisplayValue(classForm, 'a');
    const propertyA = inputA.parentElement?.parentElement
      ?.parentElement as HTMLElement;
    fireEvent.change(inputA, { target: { value: 'abcdefg' } });
    await waitFor(() => getByDisplayValue(classForm, 'abcdefg'));
    expect(getAllByDisplayValue(propertyA, '1')).toHaveLength(2);
    expect(getByText(propertyA, 'String')).not.toBeNull();
    expect(getAllByRole(propertyA, 'button')).toHaveLength(2);
    fireEvent.click(guaranteeNonNullable(getAllByRole(propertyA, 'button')[1]));
    expect(queryByDisplayValue(classForm, 'abcdefg')).toBeNull();
    // Sub Panel Property
    const inputB = getByDisplayValue(classForm, 'b');
    const propertyB = inputB.parentElement?.parentElement
      ?.parentElement as HTMLElement;
    const buttons = getAllByRole(propertyB, 'button');
    expect(buttons).toHaveLength(2);
    expect(queryByDisplayValue(classForm, 'ProfileTest')).toBeNull();
    const navigateToPropertyButton = guaranteeNonNullable(buttons[0]);
    fireEvent.click(navigateToPropertyButton);
    await waitFor(() => getByText(classForm, 'property'));
    const subPropertyPanel = getByTestId(
      classForm,
      LEGEND_STUDIO_TEST_ID.PANEL,
    );
    expect(
      getByDisplayValue(subPropertyPanel, 'lets write a tag'),
    ).not.toBeNull();
    expect(getAllByText(subPropertyPanel, 'tag2')).not.toBeNull();
    expect(getByText(subPropertyPanel, 'ProfileTest')).not.toBeNull();
    fireEvent.click(getByText(subPropertyPanel, 'Stereotypes'));
    await waitFor(() => getByText(subPropertyPanel, 'stereotype1'));
    fireEvent.click(
      guaranteeNonNullable(getAllByRole(subPropertyPanel, 'button')[0]),
    );
    expect(queryByRole(classForm, 'panel')).toBeNull();
  },
);