Python sqlalchemy.types.NullType() Examples
The following are 20
code examples of sqlalchemy.types.NullType().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sqlalchemy.types
, or try the search function
.
Example #1
Source File: compare.py From android_universal with MIT License | 6 votes |
def _compare_type( autogen_context, alter_column_op, schema, tname, cname, conn_col, metadata_col): conn_type = conn_col.type alter_column_op.existing_type = conn_type metadata_type = metadata_col.type if conn_type._type_affinity is sqltypes.NullType: log.info("Couldn't determine database type " "for column '%s.%s'", tname, cname) return if metadata_type._type_affinity is sqltypes.NullType: log.info("Column '%s.%s' has no type within " "the model; can't compare", tname, cname) return isdiff = autogen_context.migration_context._compare_type( conn_col, metadata_col) if isdiff: alter_column_op.modify_type = metadata_type log.info("Detected type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname )
Example #2
Source File: test_text.py From sqlalchemy with MIT License | 6 votes |
def test_typing_construction(self): t = text("select * from table :foo :bar :bat") self._assert_type_map( t, {"foo": NullType(), "bar": NullType(), "bat": NullType()} ) t = t.bindparams(bindparam("foo", type_=String)) self._assert_type_map( t, {"foo": String(), "bar": NullType(), "bat": NullType()} ) t = t.bindparams(bindparam("bar", type_=Integer)) self._assert_type_map( t, {"foo": String(), "bar": Integer(), "bat": NullType()} ) t = t.bindparams(bat=45.564) self._assert_type_map( t, {"foo": String(), "bar": Integer(), "bat": Float()} )
Example #3
Source File: test_column_loading.py From sqlalchemy-redshift with MIT License | 6 votes |
def test_varchar_as_nulltype(self): """ Varchar columns with no length should be considered NullType columns """ dialect = RedshiftDialect() column_info = dialect._get_column_info( 'Null Column', 'character varying', None, False, {}, {}, 'default', 'test column' ) assert isinstance(column_info['type'], NullType) column_info_1 = dialect._get_column_info( 'character column', 'character varying(30)', None, False, {}, {}, 'default', comment='test column' ) assert isinstance(column_info_1['type'], VARCHAR)
Example #4
Source File: test_types.py From sqlalchemy with MIT License | 6 votes |
def test_date_coercion(self): expr = column("bar", types.NULLTYPE) - column("foo", types.TIMESTAMP) eq_(expr.type._type_affinity, types.NullType) expr = func.sysdate() - column("foo", types.TIMESTAMP) eq_(expr.type._type_affinity, types.Interval) expr = func.current_date() - column("foo", types.TIMESTAMP) eq_(expr.type._type_affinity, types.Interval)
Example #5
Source File: test_sqlite.py From sqlalchemy with MIT License | 6 votes |
def _type_affinity_fixture(self): return [ ("LONGTEXT", sqltypes.TEXT()), ("TINYINT", sqltypes.INTEGER()), ("MEDIUMINT", sqltypes.INTEGER()), ("INT2", sqltypes.INTEGER()), ("UNSIGNED BIG INT", sqltypes.INTEGER()), ("INT8", sqltypes.INTEGER()), ("CHARACTER(20)", sqltypes.TEXT()), ("CLOB", sqltypes.TEXT()), ("CLOBBER", sqltypes.TEXT()), ("VARYING CHARACTER(70)", sqltypes.TEXT()), ("NATIVE CHARACTER(70)", sqltypes.TEXT()), ("BLOB", sqltypes.BLOB()), ("BLOBBER", sqltypes.NullType()), ("DOUBLE PRECISION", sqltypes.REAL()), ("FLOATY", sqltypes.REAL()), ("SOMETHING UNKNOWN", sqltypes.NUMERIC()), ]
Example #6
Source File: compare.py From jbox with MIT License | 6 votes |
def _compare_type( autogen_context, alter_column_op, schema, tname, cname, conn_col, metadata_col): conn_type = conn_col.type alter_column_op.existing_type = conn_type metadata_type = metadata_col.type if conn_type._type_affinity is sqltypes.NullType: log.info("Couldn't determine database type " "for column '%s.%s'", tname, cname) return if metadata_type._type_affinity is sqltypes.NullType: log.info("Column '%s.%s' has no type within " "the model; can't compare", tname, cname) return isdiff = autogen_context.migration_context._compare_type( conn_col, metadata_col) if isdiff: alter_column_op.modify_type = metadata_type log.info("Detected type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname )
Example #7
Source File: test_reflection.py From sqlalchemy with MIT License | 5 votes |
def test_skip_types(self): metadata = self.metadata with testing.db.connect() as c: c.exec_driver_sql( "create table foo (id integer primary key, data xml)" ) with mock.patch.object( testing.db.dialect, "ischema_names", {"int": mssql.INTEGER} ): t1 = Table("foo", metadata, autoload=True) assert isinstance(t1.c.id.type, Integer) assert isinstance(t1.c.data.type, types.NullType)
Example #8
Source File: convert.py From dvhb-hybrid with MIT License | 5 votes |
def Geometry(*args, **kwargs): return sa_types.NullType()
Example #9
Source File: test_external_traversal.py From sqlalchemy with MIT License | 5 votes |
def _compare_param_dict(self, a, b): if list(a) != list(b): return False from sqlalchemy.types import NullType for a_k, a_i in a.items(): b_i = b[a_k] # compare BindParameter on the left to # literal value on the right assert a_i.compare(literal(b_i, type_=NullType()))
Example #10
Source File: test_types.py From sqlalchemy with MIT License | 5 votes |
def test_null_comparison(self): eq_( str(column("a", types.NullType()) + column("b", types.NullType())), "a + b", )
Example #11
Source File: test_types.py From sqlalchemy with MIT License | 5 votes |
def test_bind_typing(self): from sqlalchemy.sql import column class MyFoobarType(types.UserDefinedType): pass class Foo(object): pass # unknown type + integer, right hand bind # coerces to given type expr = column("foo", MyFoobarType) + 5 assert expr.right.type._type_affinity is MyFoobarType # untyped bind - it gets assigned MyFoobarType bp = bindparam("foo") expr = column("foo", MyFoobarType) + bp assert bp.type._type_affinity is types.NullType # noqa assert expr.right.type._type_affinity is MyFoobarType expr = column("foo", MyFoobarType) + bindparam("foo", type_=Integer) assert expr.right.type._type_affinity is types.Integer # unknown type + unknown, right hand bind # coerces to the left expr = column("foo", MyFoobarType) + Foo() assert expr.right.type._type_affinity is MyFoobarType # including for non-commutative ops expr = column("foo", MyFoobarType) - Foo() assert expr.right.type._type_affinity is MyFoobarType expr = column("foo", MyFoobarType) - datetime.date(2010, 8, 25) assert expr.right.type._type_affinity is MyFoobarType
Example #12
Source File: test_types.py From sqlalchemy with MIT License | 5 votes |
def test_bind_adapt_insert(self): bp = bindparam("somevalue") stmt = test_table.insert().values(avalue=bp) compiled = stmt.compile() eq_(bp.type._type_affinity, types.NullType) eq_(compiled.binds["somevalue"].type._type_affinity, MyCustomType)
Example #13
Source File: test_types.py From sqlalchemy with MIT License | 5 votes |
def test_bind_adapt_update(self): bp = bindparam("somevalue") stmt = test_table.update().values(avalue=bp) compiled = stmt.compile() eq_(bp.type._type_affinity, types.NullType) eq_(compiled.binds["somevalue"].type._type_affinity, MyCustomType)
Example #14
Source File: base.py From sqlalchemy-clickhouse with Apache License 2.0 | 5 votes |
def get_columns(self, connection, table_name, schema=None, **kw): rows = self._get_table_columns(connection, table_name, schema) result = [] for r in rows: col_name = r.name col_type = "" if r.type.startswith("AggregateFunction"): # Extract type information from a column # using AggregateFunction # the type from clickhouse will be # AggregateFunction(sum, Int64) for an Int64 type # remove first 24 chars and remove the last one to get Int64 col_type = r.type[23:-1] else: # Take out the more detailed type information # e.g. 'map<int,int>' -> 'map' # 'decimal(10,1)' -> decimal col_type = re.search(r'^\w+', r.type).group(0) try: coltype = ischema_names[col_type] except KeyError: coltype = sqltypes.NullType result.append({ 'name': col_name, 'type': coltype, 'nullable': True, 'default': None, }) return result
Example #15
Source File: compare.py From alembic with MIT License | 5 votes |
def _compare_type( autogen_context, alter_column_op, schema, tname, cname, conn_col, metadata_col, ): conn_type = conn_col.type alter_column_op.existing_type = conn_type metadata_type = metadata_col.type if conn_type._type_affinity is sqltypes.NullType: log.info( "Couldn't determine database type " "for column '%s.%s'", tname, cname, ) return if metadata_type._type_affinity is sqltypes.NullType: log.info( "Column '%s.%s' has no type within " "the model; can't compare", tname, cname, ) return isdiff = autogen_context.migration_context._compare_type( conn_col, metadata_col ) if isdiff: alter_column_op.modify_type = metadata_type log.info( "Detected type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname, )
Example #16
Source File: mssql.py From alembic with MIT License | 5 votes |
def create_index(self, index): # this likely defaults to None if not present, so get() # should normally not return the default value. being # defensive in any case mssql_include = index.kwargs.get("mssql_include", None) or () for col in mssql_include: if col not in index.table.c: index.table.append_column(Column(col, sqltypes.NullType)) self._exec(CreateIndex(index))
Example #17
Source File: _schemas.py From omniduct with MIT License | 5 votes |
def get_columns(self, connection, table_name, schema=None, **kw): # Extend types supported by PrestoDialect as defined in PyHive type_map = { 'bigint': sql_types.BigInteger, 'integer': sql_types.Integer, 'boolean': sql_types.Boolean, 'double': sql_types.Float, 'varchar': sql_types.String, 'timestamp': sql_types.TIMESTAMP, 'date': sql_types.DATE, 'array<bigint>': sql_types.ARRAY(sql_types.Integer), 'array<varchar>': sql_types.ARRAY(sql_types.String) } rows = self._get_table_columns(connection, table_name, schema) result = [] for row in rows: try: coltype = type_map[row.Type] except KeyError: logger.warn("Did not recognize type '%s' of column '%s'" % (row.Type, row.Column)) coltype = sql_types.NullType result.append({ 'name': row.Column, 'type': coltype, # newer Presto no longer includes this column 'nullable': getattr(row, 'Null', True), 'default': None, }) return result
Example #18
Source File: dml.py From sqlalchemy with MIT License | 4 votes |
def ordered_values(self, *args): """Specify the VALUES clause of this UPDATE statement with an explicit parameter ordering that will be maintained in the SET clause of the resulting UPDATE statement. E.g.:: stmt = table.update().ordered_values( ("name", "ed"), ("ident": "foo") ) .. seealso:: :ref:`updates_order_parameters` - full example of the :meth:`_expression.Update.ordered_values` method. .. versionchanged:: 1.4 The :meth:`_expression.Update.ordered_values` method supersedes the :paramref:`_expression.update.preserve_parameter_order` parameter, which will be removed in SQLAlchemy 2.0. """ if self._values: raise exc.ArgumentError( "This statement already has values present" ) elif self._ordered_values: raise exc.ArgumentError( "This statement already has ordered values present" ) arg = [ ( coercions.expect(roles.DMLColumnRole, k), coercions.expect( roles.ExpressionElementRole, v, type_=NullType(), is_crud=True, ), ) for k, v in args ] self._ordered_values = arg
Example #19
Source File: utils.py From oslo.db with Apache License 2.0 | 4 votes |
def _change_deleted_column_type_to_id_type_sqlite(engine, table_name, **col_name_col_instance): # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check # constraints in sqlite DB and our `deleted` column has # 2 check constraints. So there is only one way to remove # these constraints: # 1) Create new table with the same columns, constraints # and indexes. (except deleted column). # 2) Copy all data from old to new table. # 3) Drop old table. # 4) Rename new table to old table name. meta = MetaData(bind=engine) table = Table(table_name, meta, autoload=True) default_deleted_value = _get_default_deleted_value(table) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', table.c.id.type, default=default_deleted_value) columns.append(column_copy) constraints = [] for constraint in table.constraints: if not _is_deleted_column_constraint(constraint): constraints.append(constraint.copy()) new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in get_indexes(engine, table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) table.drop() for index in indexes: index.create(engine) new_table.rename(table_name) deleted = True # workaround for pyflakes new_table.update().\ where(new_table.c.deleted == deleted).\ values(deleted=new_table.c.id).\ execute() # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. deleted = False # workaround for pyflakes new_table.update().\ where(new_table.c.deleted == deleted).\ values(deleted=default_deleted_value).\ execute()
Example #20
Source File: utils.py From oslo.db with Apache License 2.0 | 4 votes |
def _change_deleted_column_type_to_boolean_sqlite(engine, table_name, **col_name_col_instance): table = get_table(engine, table_name) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', Boolean, default=0) columns.append(column_copy) constraints = [constraint.copy() for constraint in table.constraints] meta = table.metadata new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in get_indexes(engine, table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) c_select = [] for c in table.c: if c.name != "deleted": c_select.append(c) else: c_select.append(table.c.deleted == table.c.id) table.drop() for index in indexes: index.create(engine) new_table.rename(table_name) new_table.update().\ where(new_table.c.deleted == new_table.c.id).\ values(deleted=True).\ execute()