diff options
-rw-r--r-- | doc/build/content/mappers.txt | 451 |
1 files changed, 225 insertions, 226 deletions
diff --git a/doc/build/content/mappers.txt b/doc/build/content/mappers.txt index 11c038252..3e9156335 100644 --- a/doc/build/content/mappers.txt +++ b/doc/build/content/mappers.txt @@ -24,64 +24,63 @@ To load only a part of the columns referenced by a table as attributes, use the {python} mapper(User, users_table, include_properties=['user_id', 'user_name']) - + mapper(Address, addresses_table, exclude_properties=['street', 'city', 'state', 'zip']) - + To change the name of the attribute mapped to a particular column, place the `Column` object in the `properties` dictionary with the desired key: {python} - mapper(User, users_table, properties={ - 'id' : users_table.c.user_id, - 'name' : users_table.c.user_name, + mapper(User, users_table, properties={ + 'id': users_table.c.user_id, + 'name': users_table.c.user_name, }) To change the names of all attributes using a prefix, use the `column_prefix` option. This is useful for classes which wish to add their own `property` accessors: {python} mapper(User, users_table, column_prefix='_') - + The above will place attribute names such as `_user_id`, `_user_name`, `_password` etc. on the mapped `User` class. - + To place multiple columns which are known to be "synonymous" based on foreign key relationship or join condition into the same mapped attribute, put them together using a list, as below where we map to a `Join`: {python} # join users and addresses usersaddresses = sql.join(users_table, addresses_table, \ users_table.c.user_id == addresses_table.c.user_id) - - mapper(User, usersaddresses, - properties = { - 'id':[users_table.c.user_id, addresses_table.c.user_id], - }) + + mapper(User, usersaddresses, properties={ + 'id':[users_table.c.user_id, addresses_table.c.user_id], + }) #### Deferred Column Loading {@name=deferred} This feature allows particular columns of a table to not be loaded by default, instead being loaded later on when first referenced. It is essentially "column-level lazy loading". This feature is useful when one wants to avoid loading a large text or binary field into memory when it's not needed. Individual columns can be lazy loaded by themselves or placed into groups that lazy-load together. {python} - book_excerpts = Table('books', db, + book_excerpts = Table('books', db, Column('book_id', Integer, primary_key=True), Column('title', String(200), nullable=False), Column('summary', String(2000)), Column('excerpt', String), Column('photo', Binary) - ) + ) class Book(object): pass - # define a mapper that will load each of 'excerpt' and 'photo' in + # define a mapper that will load each of 'excerpt' and 'photo' in # separate, individual-row SELECT statements when each attribute # is first referenced on the individual object instance - mapper(Book, book_excerpts, properties = { - 'excerpt' : deferred(book_excerpts.c.excerpt), - 'photo' : deferred(book_excerpts.c.photo) + mapper(Book, book_excerpts, properties={ + 'excerpt': deferred(book_excerpts.c.excerpt), + 'photo': deferred(book_excerpts.c.photo) }) Deferred columns can be placed into groups so that they load together: {python} - book_excerpts = Table('books', db, + book_excerpts = Table('books', db, Column('book_id', Integer, primary_key=True), Column('title', String(200), nullable=False), Column('summary', String(2000)), @@ -95,13 +94,13 @@ Deferred columns can be placed into groups so that they load together: pass # define a mapper with a 'photos' deferred group. when one photo is referenced, - # all three photos will be loaded in one SELECT statement. The 'excerpt' will + # all three photos will be loaded in one SELECT statement. The 'excerpt' will # be loaded separately when it is first referenced. mapper(Book, book_excerpts, properties = { - 'excerpt' : deferred(book_excerpts.c.excerpt), - 'photo1' : deferred(book_excerpts.c.photo1, group='photos'), - 'photo2' : deferred(book_excerpts.c.photo2, group='photos'), - 'photo3' : deferred(book_excerpts.c.photo3, group='photos') + 'excerpt': deferred(book_excerpts.c.excerpt), + 'photo1': deferred(book_excerpts.c.photo1, group='photos'), + 'photo2': deferred(book_excerpts.c.photo2, group='photos'), + 'photo3': deferred(book_excerpts.c.photo3, group='photos') }) You can defer or undefer columns at the `Query` level using the `defer` and `undefer` options: @@ -123,23 +122,23 @@ To add a SQL clause composed of local or external columns as a read-only, mapped {python} mapper(User, users_table, properties={ - 'fullname' : column_property( + 'fullname': column_property( (users_table.c.firstname + " " + users_table.c.lastname).label('fullname') ) }) Correlated subqueries may be used as well: - + {python} mapper(User, users_table, properties={ - 'address_count' : column_property( + 'address_count': column_property( select( - [func.count(addresses_table.c.address_id)], + [func.count(addresses_table.c.address_id)], addresses_table.c.user_id==users_table.c.user_id ).label('address_count') ) }) - + #### Overriding Attribute Behavior with Synonyms {@name=overriding} A common request is the ability to create custom class properties that override the behavior of setting/getting an attribute. As of 0.4.2, the `synonym()` construct provides an easy way to do this in conjunction with a normal Python `property` constructs. Below, we re-map the `email` column of our mapped table to a custom attribute setter/getter, mapping the actual column to the property named `_email`: @@ -151,9 +150,9 @@ A common request is the ability to create custom class properties that override def _get_email(self): return self._email email = property(_get_email, _set_email) - - mapper(MyAddress, addresses_table, properties = { - 'email':synonym('_email', map_column=True) + + mapper(MyAddress, addresses_table, properties={ + 'email': synonym('_email', map_column=True) }) The `email` attribute is now usable in the same way as any other mapped attribute, including filter expressions, get/set operations, etc.: @@ -163,7 +162,7 @@ The `email` attribute is now usable in the same way as any other mapped attribut address.email = 'some other address' sess.flush() - + q = sess.query(MyAddress).filter_by(email='some other address') If the mapped class does not provide a property, the `synonym()` construct will create a default getter/setter object automatically. @@ -173,7 +172,7 @@ If the mapped class does not provide a property, the `synonym()` construct will Sets of columns can be associated with a single datatype. The ORM treats the group of columns like a single column which accepts and returns objects using the custom datatype you provide. In this example, we'll create a table `vertices` which stores a pair of x/y coordinates, and a custom datatype `Point` which is a composite type of an x and y column: {python} - vertices = Table('vertices', metadata, + vertices = Table('vertices', metadata, Column('id', Integer, primary_key=True), Column('x1', Integer), Column('y1', Integer), @@ -182,14 +181,14 @@ Sets of columns can be associated with a single datatype. The ORM treats the gr ) The requirements for the custom datatype class are that it have a constructor which accepts positional arguments corresponding to its column format, and also provides a method `__composite_values__()` which returns the state of the object as a list or tuple, in order of its column-based attributes. It also should supply adequate `__eq__()` and `__ne__()` methods which test the equality of two instances, and may optionally provide a `__set_composite_values__` method which is used to set internal state in some cases (typically when default values have been generated during a flush): - + {python} class Point(object): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): - return [self.x, self.y] + return [self.x, self.y] def __set_composite_values__(self, x, y): self.x = x self.y = y @@ -206,10 +205,10 @@ Setting up the mapping uses the `composite()` function: {python} class Vertex(object): pass - + mapper(Vertex, vertices, properties={ - 'start':composite(Point, vertices.c.x1, vertices.c.y1), - 'end':composite(Point, vertices.c.x2, vertices.c.y2) + 'start': composite(Point, vertices.c.x1, vertices.c.y1), + 'end': composite(Point, vertices.c.x2, vertices.c.y2) }) We can now use the `Vertex` instances as well as querying as though the `start` and `end` attributes are regular scalar attributes: @@ -218,15 +217,15 @@ We can now use the `Vertex` instances as well as querying as though the `start` sess = Session() v = Vertex(Point(3, 4), Point(5, 6)) sess.save(v) - + v2 = sess.query(Vertex).filter(Vertex.start == Point(3, 4)) - + The "equals" comparison operation by default produces an AND of all corresponding columns equated to one another. If you'd like to override this, or define the behavior of other SQL operators for your new type, the `composite()` function accepts an extension object of type `sqlalchemy.orm.PropComparator`: {python} from sqlalchemy.orm import PropComparator from sqlalchemy import sql - + class PointComparator(PropComparator): def __gt__(self, other): """define the 'greater than' operation""" @@ -234,10 +233,10 @@ The "equals" comparison operation by default produces an AND of all correspondin return sql.and_(*[a>b for a, b in zip(self.prop.columns, other.__composite_values__())]) - + maper(Vertex, vertices, properties={ - 'start':composite(Point, vertices.c.x1, vertices.c.y1, comparator=PointComparator), - 'end':composite(Point, vertices.c.x2, vertices.c.y2, comparator=PointComparator) + 'start': composite(Point, vertices.c.x1, vertices.c.y1, comparator=PointComparator), + 'end': composite(Point, vertices.c.x2, vertices.c.y2, comparator=PointComparator) }) #### Controlling Ordering {@name=orderby} @@ -250,8 +249,8 @@ The "default" ordering for a collection, which applies to list-based collections mapper(Address, addresses_table) # order address objects by address id - mapper(User, users_table, properties = { - 'addresses' : relation(Address, order_by=addresses_table.c.address_id) + mapper(User, users_table, properties={ + 'addresses': relation(Address, order_by=addresses_table.c.address_id) }) Note that when using eager loaders with relations, the tables used by the eager load's join are anonymously aliased. You can only order by these columns if you specify it at the `relation()` level. To control ordering at the query level based on a related table, you `join()` to that relation, then order by it: @@ -264,7 +263,7 @@ Ordering for rows loaded through `Query` is usually specified using the `order_b {python} # order by a column mapper(User, users_table, order_by=users_table.c.user_id) - + # order by multiple items mapper(User, users_table, order_by=[users_table.c.user_id, users_table.c.user_name.desc()]) @@ -304,7 +303,7 @@ For the following sections, assume this class relationship: In joined table inheritance, each class along a particular classes' list of parents is represented by a unique table. The total set of attributes for a particular instance is represented as a join along all tables in its inheritance path. Here, we first define a table to represent the `Employee` class. This table will contain a primary key column (or columns), and a column for each attribute that's represented by `Employee`. In this case it's just `name`: {python} - employees = Table('employees', metadata, + employees = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('type', String(30), nullable=False) @@ -315,12 +314,12 @@ The table also has a column called `type`. It is strongly advised in both singl Next we define individual tables for each of `Engineer` and `Manager`, which each contain columns that represent the attributes unique to the subclass they represent. Each table also must contain a primary key column (or columns), and in most cases a foreign key reference to the parent table. It is standard practice that the same column is used for both of these roles, and that the column is also named the same as that of the parent table. However this is optional in SQLAlchemy; separate columns may be used for primary key and parent-relation, the column may be named differently than that of the parent, and even a custom join condition can be specified between parent and child tables instead of using a foreign key. In joined table inheritance, the primary key of an instance is always represented by the primary key of the base table only (new in SQLAlchemy 0.4). {python} - engineers = Table('engineers', metadata, + engineers = Table('engineers', metadata, Column('employee_id', Integer, ForeignKey('employees.employee_id'), primary_key=True), Column('engineer_info', String(50)), ) - managers = Table('managers', metadata, + managers = Table('managers', metadata, Column('employee_id', Integer, ForeignKey('employees.employee_id'), primary_key=True), Column('manager_data', String(50)), ) @@ -347,12 +346,12 @@ Selects only from the `employees` table. The criterion we use in `filter()` and {python} session.query(Employee).with_polymorphic(Engineer).filter(Engineer.engineer_info=='some info') - + Even without criterion, the `with_polymorphic()` method has the added advantage that instances are loaded from all of their tables in one result set. Such as, to optimize the loading of all `Employee` objects, `with_polymorphic()` accepts `'*'` as a wildcard indicating that all subclass tables should be joined: {python} session.query(Employee).with_polymorphic('*').all() - + `with_polymorphic()` is an effective query-level alternative to the existing `select_table` option available on `mapper()`. Next is a way to join along `relation` paths while narrowing the criterion to specific subclasses. Suppose the `employees` table represents a collection of employees which are associated with a `Company` object. We'll add a `company_id` column to the `employees` table and a new table `companies`: @@ -363,30 +362,30 @@ Next is a way to join along `relation` paths while narrowing the criterion to sp Column('name', String(50)) ) - employees = Table('employees', metadata, + employees = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('type', String(30), nullable=False), Column('company_id', Integer, ForeignKey('companies.company_id')) ) - + class Company(object): pass - + mapper(Company, companies, properties={ - 'employees':relation(Employee) + 'employees': relation(Employee) }) - + If we wanted to join from `Company` to not just `Employee` but specifically `Engineers`, using the `join()` method or `any()` or `has()` operators will by default create a join from `companies` to `employees`, without including `engineers` or `managers` in the mix. If we wish to have criterion which is specifically against the `Engineer` class, we can tell those methods to join or subquery against the full set of tables representing the subclass using the `of_type()` opertator: {python} session.query(Company).join(Company.employees.of_type(Engineer)).filter(Engineer.engineer_info=='someinfo') - + A longhand notation, introduced in 0.4.3, is also available, which involves spelling out the full target selectable within a 2-tuple: {python} session.query(Company).join(('employees', employees.join(engineers))).filter(Engineer.engineer_info=='someinfo') - + The second notation allows more flexibility, such as joining to any group of subclass tables: {python} @@ -397,13 +396,13 @@ The `any()` and `has()` operators also can be used with `of_type()` when the emb {python} session.query(Company).filter(Company.employees.of_type(Engineer).any(Engineer.engineer_info=='someinfo')).all() - + Note that the `any()` and `has()` are both shorthand for a correlated EXISTS query. To build one by hand looks like: {python} session.query(Company).filter( - exists([1], - and_(Engineer.engineer_info=='someinfo', employees.c.company_id==companies.c.company_id), + exists([1], + and_(Engineer.engineer_info=='someinfo', employees.c.company_id==companies.c.company_id), from_obj=employees.join(engineers) ) ).all() @@ -417,23 +416,23 @@ When loading fresh from the database, the joined-table setup above will query fr {python} session.query(Employee).all() {opensql} - SELECT employees.employee_id AS employees_employee_id, employees.name AS employees_name, employees.type AS employees_type + SELECT employees.employee_id AS employees_employee_id, employees.name AS employees_name, employees.type AS employees_type FROM employees ORDER BY employees.oid [] - SELECT managers.employee_id AS managers_employee_id, managers.manager_data AS managers_manager_data - FROM managers + SELECT managers.employee_id AS managers_employee_id, managers.manager_data AS managers_manager_data + FROM managers WHERE ? = managers.employee_id [5] - SELECT engineers.employee_id AS engineers_employee_id, engineers.engineer_info AS engineers_engineer_info - FROM engineers + SELECT engineers.employee_id AS engineers_employee_id, engineers.engineer_info AS engineers_engineer_info + FROM engineers WHERE ? = engineers.employee_id [2] - SELECT engineers.employee_id AS engineers_employee_id, engineers.engineer_info AS engineers_engineer_info - FROM engineers + SELECT engineers.employee_id AS engineers_employee_id, engineers.engineer_info AS engineers_engineer_info + FROM engineers WHERE ? = engineers.employee_id [4] - SELECT managers.employee_id AS managers_employee_id, managers.manager_data AS managers_manager_data - FROM managers + SELECT managers.employee_id AS managers_employee_id, managers.manager_data AS managers_manager_data + FROM managers WHERE ? = managers.employee_id [1] @@ -453,15 +452,15 @@ More commonly, an all-at-once load may be achieved by constructing a query which {python} query = session.query(Employee).with_polymorphic([Engineer, Manager]) - + Which produces a query like the following: {python} query.all() {opensql} - SELECT employees.employee_id AS employees_employee_id, engineers.employee_id AS engineers_employee_id, managers.employee_id AS managers_employee_id, employees.name AS employees_name, employees.type AS employees_type, engineers.engineer_info AS engineers_engineer_info, managers.manager_data AS managers_manager_data + SELECT employees.employee_id AS employees_employee_id, engineers.employee_id AS engineers_employee_id, managers.employee_id AS managers_employee_id, employees.name AS employees_name, employees.type AS employees_type, engineers.engineer_info AS engineers_engineer_info, managers.manager_data AS managers_manager_data FROM employees LEFT OUTER JOIN engineers ON employees.employee_id = engineers.employee_id LEFT OUTER JOIN managers ON employees.employee_id = managers.employee_id ORDER BY employees.oid - [] + [] `with_polymorphic()` accepts a single class or mapper, a list of classes/mappers, or the string `'*'` to indicate all subclasses. It also accepts a second argument `selectable` which replaces the automatic join creation and instead selects directly from the selectable given. This can allow polymorphic loads from a variety of inheritance schemes including concrete tables, if the appropriate unions are constructed. @@ -484,14 +483,14 @@ When `select_table` is used, `with_polymorphic()` still overrides its usage at t Single table inheritance is where the attributes of the base class as well as all subclasses are represented within a single table. A column is present in the table for every attribute mapped to the base class and all subclasses; the columns which correspond to a single subclass are nullable. This configuration looks much like joined-table inheritance except there's only one table. In this case, a `type` column is required, as there would be no other way to discriminate between classes. The table is specified in the base mapper only; for the inheriting classes, leave their `table` parameter blank: {python} - employees_table = Table('employees', metadata, + employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), Column('engineer_info', String(50)), Column('type', String(20), nullable=False) ) - + employee_mapper = mapper(Employee, employees_table, \ polymorphic_on=employees_table.c.type, polymorphic_identity='employee') manager_mapper = mapper(Manager, inherits=employee_mapper, polymorphic_identity='manager') @@ -504,18 +503,18 @@ Note that the mappers for the derived classes Manager and Engineer omit the spec This form of inheritance maps each class to a distinct table, as below: {python} - employees_table = Table('employees', metadata, + employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), ) - managers_table = Table('managers', metadata, + managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), ) - engineers_table = Table('engineers', metadata, + engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), @@ -532,9 +531,9 @@ To load polymorphically, the `select_table` argument is currently required. In {python} pjoin = polymorphic_union({ - 'employee':employees_table, - 'manager':managers_table, - 'engineer':engineers_table + 'employee': employees_table, + 'manager': managers_table, + 'engineer': engineers_table }, 'type', 'pjoin') employee_mapper = mapper(Employee, employees_table, select_table=pjoin, \ @@ -549,19 +548,19 @@ Upon select, the polymorphic union produces a query like this: {python} session.query(Employee).all() {opensql} - SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id, - pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info + SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id, + pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info FROM ( - SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, - CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type - FROM employees - UNION ALL - SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name, - CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type - FROM managers - UNION ALL - SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, - engineers.engineer_info AS engineer_info, 'engineer' AS type + SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, + CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type + FROM employees + UNION ALL + SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name, + CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type + FROM managers + UNION ALL + SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, + engineers.engineer_info AS engineer_info, 'engineer' AS type FROM engineers ) AS pjoin ORDER BY pjoin.oid [] @@ -571,46 +570,46 @@ Upon select, the polymorphic union produces a query like this: Both joined-table and single table inheritance scenarios produce mappings which are usable in relation() functions; that is, it's possible to map a parent object to a child object which is polymorphic. Similarly, inheriting mappers can have `relation()`s of their own at any level, which are inherited to each child class. The only requirement for relations is that there is a table relationship between parent and child. An example is the following modification to the joined table inheritance example, which sets a bi-directional relationship between `Employee` and `Company`: {python} - employees_table = Table('employees', metadata, + employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.company_id')) ) - companies = Table('companies', metadata, + companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True), Column('name', String(50))) class Company(object): pass - + mapper(Company, companies, properties={ 'employees': relation(Employee, backref='company') }) - + SQLAlchemy has a lot of experience in this area; the optimized "outer join" approach can be used freely for parent and child relationships, eager loads are fully useable, query aliasing and other tricks are fully supported as well. In a concrete inheritance scenario, mapping `relation()`s is more difficult since the distinct classes do not share a table. In this case, you *can* establish a relationship from parent to child if a join condition can be constructed from parent to child, if each child table contains a foreign key to the parent: {python} - companies = Table('companies', metadata, + companies = Table('companies', metadata, Column('id', Integer, primary_key=True), Column('name', String(50))) - employees_table = Table('employees', metadata, + employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) - managers_table = Table('managers', metadata, + managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) - engineers_table = Table('engineers', metadata, + engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), @@ -621,7 +620,7 @@ In a concrete inheritance scenario, mapping `relation()`s is more difficult sinc mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') mapper(Company, companies, properties={ - 'employees':relation(Employee) + 'employees': relation(Employee) }) Let's crank it up and try loading with an eager load: @@ -629,15 +628,15 @@ Let's crank it up and try loading with an eager load: {python} session.query(Company).options(eagerload('employees')).all() {opensql} - SELECT anon_1.type AS anon_1_type, anon_1.manager_data AS anon_1_manager_data, anon_1.engineer_info AS anon_1_engineer_info, - anon_1.employee_id AS anon_1_employee_id, anon_1.name AS anon_1_name, anon_1.company_id AS anon_1_company_id, - companies.id AS companies_id, companies.name AS companies_name - FROM companies LEFT OUTER JOIN (SELECT CAST(NULL AS VARCHAR(50)) AS engineer_info, employees.employee_id AS employee_id, - CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, employees.company_id AS company_id, 'employee' AS type - FROM employees UNION ALL SELECT CAST(NULL AS VARCHAR(50)) AS engineer_info, managers.employee_id AS employee_id, - managers.manager_data AS manager_data, managers.name AS name, managers.company_id AS company_id, 'manager' AS type - FROM managers UNION ALL SELECT engineers.engineer_info AS engineer_info, engineers.employee_id AS employee_id, - CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, engineers.company_id AS company_id, 'engineer' AS type + SELECT anon_1.type AS anon_1_type, anon_1.manager_data AS anon_1_manager_data, anon_1.engineer_info AS anon_1_engineer_info, + anon_1.employee_id AS anon_1_employee_id, anon_1.name AS anon_1_name, anon_1.company_id AS anon_1_company_id, + companies.id AS companies_id, companies.name AS companies_name + FROM companies LEFT OUTER JOIN (SELECT CAST(NULL AS VARCHAR(50)) AS engineer_info, employees.employee_id AS employee_id, + CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, employees.company_id AS company_id, 'employee' AS type + FROM employees UNION ALL SELECT CAST(NULL AS VARCHAR(50)) AS engineer_info, managers.employee_id AS employee_id, + managers.manager_data AS manager_data, managers.name AS name, managers.company_id AS company_id, 'manager' AS type + FROM managers UNION ALL SELECT engineers.engineer_info AS engineer_info, engineers.employee_id AS employee_id, + CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, engineers.company_id AS company_id, 'engineer' AS type FROM engineers) AS anon_1 ON companies.id = anon_1.company_id ORDER BY companies.oid, anon_1.oid [] @@ -654,30 +653,30 @@ Mappers can be constructed against arbitrary relational units (called `Selectabl # define a Join j = join(users_table, addresses_table) - - # map to it - the identity of an AddressUser object will be + + # map to it - the identity of an AddressUser object will be # based on (user_id, address_id) since those are the primary keys involved mapper(AddressUser, j, properties={ - 'user_id':[users_table.c.user_id, addresses_table.c.user_id] + 'user_id': [users_table.c.user_id, addresses_table.c.user_id] }) A second example: {python} # many-to-many join on an association table - j = join(users_table, userkeywords, - users_table.c.user_id==userkeywords.c.user_id).join(keywords, + j = join(users_table, userkeywords, + users_table.c.user_id==userkeywords.c.user_id).join(keywords, userkeywords.c.keyword_id==keywords.c.keyword_id) - - # a class + + # a class class KeywordUser(object): pass # map to it - the identity of a KeywordUser object will be # (user_id, keyword_id) since those are the primary keys involved mapper(KeywordUser, j, properties={ - 'user_id':[users_table.c.user_id, userkeywords.c.user_id], - 'keyword_id':[userkeywords.c.keyword_id, keywords.c.keyword_id] + 'user_id': [users_table.c.user_id, userkeywords.c.user_id], + 'keyword_id': [userkeywords.c.keyword_id, keywords.c.keyword_id] }) In both examples above, "composite" columns were added as properties to the mappers; these are aggregations of multiple columns into one mapper property, which instructs the mapper to keep both of those columns set at the same value. @@ -687,17 +686,17 @@ In both examples above, "composite" columns were added as properties to the mapp Similar to mapping against a join, a plain select() object can be used with a mapper as well. Below, an example select which contains two aggregate functions and a group_by is mapped to a class: {python} - s = select([customers, - func.count(orders).label('order_count'), + s = select([customers, + func.count(orders).label('order_count'), func.max(orders.price).label('highest_order')], customers.c.customer_id==orders.c.customer_id, group_by=[c for c in customers.c] ).alias('somealias') class Customer(object): pass - + mapper(Customer, s) - + Above, the "customers" table is joined against the "orders" table to produce a full row for each customer row, the total count of related rows in the "orders" table, and the highest price in the "orders" table, grouped against the full set of columns in the "customers" table. That query is then mapped against the Customer class. New instances of Customer will contain attributes for each column in the "customers" table as well as an "order_count" and "highest_order" attribute. Updates to the Customer object will only be reflected in the "customers" table and not the "orders" table. This is because the primary key columns of the "orders" table are not represented in this mapper and therefore the table is not affected by save or delete operations. #### Multiple Mappers for One Class {@name=multiple} @@ -709,10 +708,10 @@ The first mapper created for a certain class is known as that class's "primary m {python} # primary mapper mapper(User, users_table) - + # make a secondary mapper to load User against a join othermapper = mapper(User, users_table.join(someothertable), non_primary=True) - + # select result = session.query(othermapper).select() @@ -817,12 +816,12 @@ A one to many relationship places a foreign key in the child table referencing t class Parent(object): pass - + class Child(object): pass - + mapper(Parent, parent_table, properties={ - 'children':relation(Child) + 'children': relation(Child) }) mapper(Child, child_table) @@ -831,7 +830,7 @@ To establish a bi-directional relationship in one-to-many, where the "reverse" s {python} mapper(Parent, parent_table, properties={ - 'children':relation(Child, backref='parent') + 'children': relation(Child, backref='parent') }) mapper(Child, child_table) @@ -850,7 +849,7 @@ Many to one places a foreign key in the parent table referencing the child. The child_table = Table('child', metadata, Column('id', Integer, primary_key=True), ) - + class Parent(object): pass @@ -858,9 +857,9 @@ Many to one places a foreign key in the parent table referencing the child. The pass mapper(Parent, parent_table, properties={ - 'child':relation(Child) + 'child': relation(Child) }) - + mapper(Child, child_table) Backref behavior is available here as well, where `backref="parents"` will place a one-to-many collection on the `Child` class. @@ -871,14 +870,14 @@ One To One is essentially a bi-directional relationship with a scalar attribute {python} mapper(Parent, parent_table, properties={ - 'child':relation(Child, uselist=False, backref='parent') + 'child': relation(Child, uselist=False, backref='parent') }) Or to turn many-to-one into one-to-one: {python} mapper(Parent, parent_table, properties={ - 'child':relation(Child, backref=backref('parent', uselist=False)) + 'child': relation(Child, backref=backref('parent', uselist=False)) }) ##### Many To Many {@name=manytomany} @@ -891,28 +890,28 @@ Many to Many adds an association table between two classes. The association tab right_table = Table('right', metadata, Column('id', Integer, primary_key=True)) - + association_table = Table('association', metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')), ) - + mapper(Parent, left_table, properties={ - 'children':relation(Child, secondary=association_table) + 'children': relation(Child, secondary=association_table) }) - + mapper(Child, right_table) For a bi-directional relationship, both sides of the relation contain a collection by default, which can be modified on either side via the `uselist` flag to be scalar. The `backref` keyword will automatically use the same `secondary` argument for the reverse relation: {python} mapper(Parent, left_table, properties={ - 'children':relation(Child, secondary=association_table, backref='parents') + 'children': relation(Child, secondary=association_table, backref='parents') }) - + ##### Association Object -The association object pattern is a variant on many-to-many: it specifically is used when your association table contains additional columns beyond those which are foreign keys to the left and right tables. Instead of using the `secondary` argument, you map a new class directly to the association table. The left side of the relation references the association object via one-to-many, and the association class references the right side via many-to-one. +The association object pattern is a variant on many-to-many: it specifically is used when your association table contains additional columns beyond those which are foreign keys to the left and right tables. Instead of using the `secondary` argument, you map a new class directly to the association table. The left side of the relation references the association object via one-to-many, and the association class references the right side via many-to-one. {python} left_table = Table('left', metadata, @@ -920,21 +919,21 @@ The association object pattern is a variant on many-to-many: it specifically is right_table = Table('right', metadata, Column('id', Integer, primary_key=True)) - + association_table = Table('association', metadata, Column('left_id', Integer, ForeignKey('left.id'), primary_key=True), Column('right_id', Integer, ForeignKey('right.id'), primary_key=True), Column('data', String(50)) ) - + mapper(Parent, left_table, properties={ 'children':relation(Association) }) - + mapper(Association, association_table, properties={ 'child':relation(Child) }) - + mapper(Child, right_table) The bi-directional version adds backrefs to both relations: @@ -958,8 +957,8 @@ Working with the association pattern in its direct form requires that child obje a = Association() a.child = Child() p.children.append(a) - - # iterate through child objects via association, including association + + # iterate through child objects via association, including association # attributes for assoc in p.children: print assoc.data @@ -988,7 +987,7 @@ A graph such as the following: root --+---> child1 +---> child2 --+--> subchild1 | +--> subchild2 - +---> child3 + +---> child3 Would be represented with data such as: @@ -1001,30 +1000,30 @@ Would be represented with data such as: 4 3 subchild1 5 3 subchild2 6 1 child3 - -SQLAlchemy's `mapper()` configuration for a self-referential one-to-many relationship is exactly like a "normal" one-to-many relationship. When SQLAlchemy encounters the foreign key relation from `treenodes` to `treenodes`, it assumes one-to-many unless told otherwise: - + +SQLAlchemy's `mapper()` configuration for a self-referential one-to-many relationship is exactly like a "normal" one-to-many relationship. When SQLAlchemy encounters the foreign key relation from `treenodes` to `treenodes`, it assumes one-to-many unless told otherwise: + {python} # entity class class Node(object): pass mapper(Node, nodes, properties={ - 'children':relation(Node) + 'children': relation(Node) }) - + To create a many-to-one relationship from child to parent, an extra indicator of the "remote side" is added, which contains the `Column` object or objects indicating the remote side of the relation: {python} mapper(Node, nodes, properties={ - 'parent':relation(Node, remote_side=[nodes.c.id]) + 'parent': relation(Node, remote_side=[nodes.c.id]) }) - + And the bi-directional version combines both: {python} mapper(Node, nodes, properties={ - 'children':relation(Node, backref=backref('parent', remote_side=[nodes.c.id])) + 'children': relation(Node, backref=backref('parent', remote_side=[nodes.c.id])) }) There are several examples included with SQLAlchemy illustrating self-referential strategies; these include [basic_tree.py](http://www.sqlalchemy.org/trac/browser/sqlalchemy/trunk/examples/adjacencytree/basic_tree.py) and [optimized_al.py](http://www.sqlalchemy.org/trac/browser/sqlalchemy/trunk/examples/elementtree/optimized_al.py), the latter of which illustrates how to persist and search XML documents in conjunction with [ElementTree](http://effbot.org/zone/element-index.htm). @@ -1044,8 +1043,8 @@ On the subject of joins, i.e. those described in [datamapping_joins](rel:datamap nodealias = nodes.alias() {sql}sess.query(Node).filter(Node.data=='subchild1').\ filter(and_(Node.parent_id==nodealias.c.id, nodealias.c.data=='child2')).all() - SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data - FROM treenodes, treenodes AS treenodes_1 + SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data + FROM treenodes, treenodes AS treenodes_1 WHERE treenodes.data = ? AND treenodes.parent_id = treenodes_1.id AND treenodes_1.data = ? ORDER BY treenodes.oid ['subchild1', 'child2'] @@ -1055,8 +1054,8 @@ or automatically, using `join()` with `aliased=True`: # get all nodes named 'subchild1' with a parent named 'child2' {sql}sess.query(Node).filter(Node.data=='subchild1').\ join('parent', aliased=True).filter(Node.data=='child2').all() - SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data - FROM treenodes JOIN treenodes AS treenodes_1 ON treenodes_1.id = treenodes.parent_id + SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data + FROM treenodes JOIN treenodes AS treenodes_1 ON treenodes_1.id = treenodes.parent_id WHERE treenodes.data = ? AND treenodes_1.data = ? ORDER BY treenodes.oid ['subchild1', 'child2'] @@ -1067,8 +1066,8 @@ To add criterion to multiple points along a longer join, use `from_joinpoint=Tru {sql}sess.query(Node).filter(Node.data=='subchild1').\ join('parent', aliased=True).filter(Node.data=='child2').\ join('parent', aliased=True, from_joinpoint=True).filter(Node.data=='root').all() - SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data - FROM treenodes JOIN treenodes AS treenodes_1 ON treenodes_1.id = treenodes.parent_id JOIN treenodes AS treenodes_2 ON treenodes_2.id = treenodes_1.parent_id + SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data + FROM treenodes JOIN treenodes AS treenodes_1 ON treenodes_1.id = treenodes.parent_id JOIN treenodes AS treenodes_2 ON treenodes_2.id = treenodes_1.parent_id WHERE treenodes.data = ? AND treenodes_1.data = ? AND treenodes_2.data = ? ORDER BY treenodes.oid ['subchild1', 'child2', 'root'] @@ -1078,11 +1077,11 @@ Eager loading of relations occurs using joins or outerjoins from parent to child {python} mapper(Node, nodes, properties={ - 'children':relation(Node, lazy=False, join_depth=2) + 'children': relation(Node, lazy=False, join_depth=2) }) - + {sql}session.query(Node).all() - SELECT treenodes_1.id AS treenodes_1_id, treenodes_1.parent_id AS treenodes_1_parent_id, treenodes_1.data AS treenodes_1_data, treenodes_2.id AS treenodes_2_id, treenodes_2.parent_id AS treenodes_2_parent_id, treenodes_2.data AS treenodes_2_data, treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data + SELECT treenodes_1.id AS treenodes_1_id, treenodes_1.parent_id AS treenodes_1_parent_id, treenodes_1.data AS treenodes_1_data, treenodes_2.id AS treenodes_2_id, treenodes_2.parent_id AS treenodes_2_parent_id, treenodes_2.data AS treenodes_2_data, treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data FROM treenodes LEFT OUTER JOIN treenodes AS treenodes_2 ON treenodes.id = treenodes_2.parent_id LEFT OUTER JOIN treenodes AS treenodes_1 ON treenodes_2.id = treenodes_1.parent_id ORDER BY treenodes.oid, treenodes_2.oid, treenodes_1.oid [] @@ -1097,14 +1096,14 @@ In this example we create a relation `boston_addresses` which will only load the pass class Address(object): pass - + mapper(Address, addresses_table) mapper(User, users_table, properties={ - 'boston_addresses' : relation(Address, primaryjoin= - and_(users_table.c.user_id==addresses_table.c.user_id, + 'boston_addresses': relation(Address, primaryjoin= + and_(users_table.c.user_id==addresses_table.c.user_id, addresses_table.c.city=='Boston')) }) - + Many to many relationships can be customized by one or both of `primaryjoin` and `secondaryjoin`, shown below with just the default many-to-many relationship explicitly set: {python} @@ -1114,7 +1113,7 @@ Many to many relationships can be customized by one or both of `primaryjoin` and pass mapper(Keyword, keywords_table) mapper(User, users_table, properties={ - 'keywords':relation(Keyword, secondary=userkeywords_table, + 'keywords': relation(Keyword, secondary=userkeywords_table, primaryjoin=users_table.c.user_id==userkeywords_table.c.user_id, secondaryjoin=userkeywords_table.c.keyword_id==keywords_table.c.keyword_id ) @@ -1127,7 +1126,7 @@ When using `primaryjoin` and `secondaryjoin`, SQLAlchemy also needs to be aware {python} mapper(Address, addresses_table) mapper(User, users_table, properties={ - 'addresses' : relation(Address, primaryjoin= + 'addresses': relation(Address, primaryjoin= users_table.c.user_id==addresses_table.c.user_id, foreign_keys=[addresses_table.c.user_id]) }) @@ -1148,12 +1147,12 @@ Theres no restriction on how many times you can relate from parent to child. SQ {python} mapper(User, users_table, properties={ - 'boston_addresses' : relation(Address, primaryjoin= - and_(users_table.c.user_id==Address.c.user_id, - Addresses.c.city=='Boston')), - 'newyork_addresses' : relation(Address, primaryjoin= - and_(users_table.c.user_id==Address.c.user_id, - Addresses.c.city=='New York')), + 'boston_addresses': relation(Address, primaryjoin= + and_(users_table.c.user_id==addresses_table.c.user_id, + addresses_table.c.city=='Boston')), + 'newyork_addresses': relation(Address, primaryjoin= + and_(users_table.c.user_id==addresses_table.c.user_id, + addresses_table.c.city=='New York')), }) #### Alternate Collection Implementations {@name=collections} @@ -1275,7 +1274,7 @@ A `dict` can be used as a collection, but a keying strategy is needed to map ent mapper(Item, items_table, properties={ # key by column 'notes': relation(Note, collection_class=column_mapped_collection(notes_table.c.keyword)), - # or named attribute + # or named attribute 'notes2': relation(Note, collection_class=attribute_mapped_collection('keyword')), # or any callable 'notes3': relation(Note, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) @@ -1325,19 +1324,19 @@ In the [datamapping](rel:datamapping), we introduced the concept of **Eager Load {python} {sql}>>> jack = session.query(User).options(eagerload('addresses')).filter_by(name='jack').all() #doctest: +NORMALIZE_WHITESPACE - SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, - addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, - users.fullname AS users_fullname, users.password AS users_password - FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id + SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, + addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, + users.fullname AS users_fullname, users.password AS users_password + FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY users.oid, addresses_1.oid ['jack'] - + By default, all relations are **lazy loading**. The scalar or collection attribute associated with a `relation()` contains a trigger which fires the first time the attribute is accessed, which issues a SQL call at that point: {python} {sql}>>> jack.addresses - SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id - FROM addresses + SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id + FROM addresses WHERE ? = addresses.user_id ORDER BY addresses.oid [5] {stop}[<Address(u'jack@google.com')>, <Address(u'j25@yahoo.com')>] @@ -1347,7 +1346,7 @@ The default **loader strategy** for any `relation()` is configured by the `lazy` {python} # eager load 'children' attribute mapper(Parent, parent_table, properties={ - 'children':relation(Child, lazy=False) + 'children': relation(Child, lazy=False) }) The loader strategy can be changed from lazy to eager as well as eager to lazy using the `eagerload()` and `lazyload()` query options: @@ -1363,19 +1362,19 @@ To reference a relation that is deeper than one level, separate the names by per {python} session.query(Parent).options(eagerload('foo.bar.bat')).all() - + When using dot-separated names with `eagerload()`, option applies **only** to the actual attribute named, and **not** its ancestors. For example, suppose a mapping from `A` to `B` to `C`, where the relations, named `atob` and `btoc`, are both lazy-loading. A statement like the following: {python} session.query(A).options(eagerload('atob.btoc')).all() - -will load only `A` objects to start. When the `atob` attribute on each `A` is accessed, the returned `B` objects will *eagerly* load their `C` objects. + +will load only `A` objects to start. When the `atob` attribute on each `A` is accessed, the returned `B` objects will *eagerly* load their `C` objects. Therefore, to modify the eager load to load both `atob` as well as `btoc`, place eagerloads for both: {python} session.query(A).options(eagerload('atob'), eagerload('atob.btoc')).all() - + or more simply just use `eagerload_all()`: {python} @@ -1392,15 +1391,15 @@ The single option `Query` provides to control this is the `contains_eager()` opt {python} # mapping is the users->addresses mapping mapper(User, users_table, properties={ - 'addresses':relation(Address, addresses_table) + 'addresses': relation(Address, addresses_table) }) - + # define a query on USERS with an outer join to ADDRESSES statement = users_table.outerjoin(addresses_table).select().apply_labels() - - # construct a Query object which expects the "addresses" results + + # construct a Query object which expects the "addresses" results query = session.query(User).options(contains_eager('addresses')) - + # get results normally r = query.from_statement(statement) @@ -1414,13 +1413,13 @@ If the "eager" portion of the statement is "aliased", the `alias` keyword argume {python} # use an alias of the Address entity adalias = aliased(Address) - - # construct a Query object which expects the "addresses" results + + # construct a Query object which expects the "addresses" results query = session.query(User).outerjoin((adalias, User.addresses)).options(contains_eager(User.addresses, alias=adalias)) # get results normally {sql}r = query.all() - SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, adalias.address_id AS adalias_address_id, + SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, adalias.address_id AS adalias_address_id, adalias.user_id AS adalias_user_id, adalias.email_address AS adalias_email_address, (...other columns...) FROM users LEFT OUTER JOIN email_addresses AS email_addresses_1 ON users.user_id = email_addresses_1.user_id @@ -1428,12 +1427,12 @@ The path given as the argument to `contains_eager()` needs to be a full path fro {python} query(User).options(contains_eager('orders', 'items')) - + The descriptor version like: {python} query(User).options(contains_eager(User.orders, Order.items)) - + A variant on `contains_eager()` is the `contains_alias()` option, which is used in the rare case that the parent object is loaded from an alias within a user-defined SELECT statement: {python} @@ -1442,11 +1441,11 @@ A variant on `contains_eager()` is the `contains_alias()` option, which is used # add on an eager load of "addresses" statement = statement.outerjoin(addresses).select().apply_labels() - + # create query, indicating "ulist" is an alias for the main table, "addresses" property should # be eager loaded query = session.query(User).options(contains_alias('ulist'), contains_eager('addresses')) - + # results r = query.from_statement(statement) @@ -1460,32 +1459,32 @@ The most useful by far is the `dynamic_loader()` relation. This is a variant of {python} mapper(User, users_table, properties={ - 'posts':dynamic_loader(Post) + 'posts': dynamic_loader(Post) }) - + jack = session.query(User).get(id) - + # filter Jack's blog posts - posts = jack.posts.filter(Post.c.headline=='this is a post') - + posts = jack.posts.filter(Post.headline=='this is a post') + # apply array slices posts = jack.posts[5:20] - + The dynamic relation supports limited write operations, via the `append()` and `remove()` methods. Since the read side of the dynamic relation always queries the database, changes to the underlying collection will not be visible until the data has been flushed: {python} - oldpost = jack.posts.filter(Post.c.headline=='old post').one() + oldpost = jack.posts.filter(Post.headline=='old post').one() jack.posts.remove(oldpost) - + jack.posts.append(Post('new post')) - + To place a dynamic relation on a backref, use `lazy='dynamic'`: {python} mapper(Post, posts_table, properties={ - 'user':relation(User, backref=backref('posts', lazy='dynamic')) + 'user': relation(User, backref=backref('posts', lazy='dynamic')) }) - + Note that eager/lazy loading options cannot be used in conjunction dynamic relations at this time. ##### Setting Noload {@name=noload} @@ -1493,8 +1492,8 @@ Note that eager/lazy loading options cannot be used in conjunction dynamic relat The opposite of the dynamic relation is simply "noload", specified using `lazy=None`: {python} - mapper(MyClass, table, properties=relation{ - 'children':relation(MyOtherClass, lazy=None) + mapper(MyClass, table, properties={ + 'children': relation(MyOtherClass, lazy=None) }) Above, the `children` collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the time they are added. However when instances of `MyClass` are freshly loaded from the database, the `children` collection stays empty. @@ -1511,17 +1510,17 @@ Use `passive_deletes=True` to disable child object loading on a DELETE operation myothertable = Table('myothertable', meta, Column('id', Integer, primary_key=True), Column('parent_id', Integer), - ForeignKeyConstraint(['parent_id'],['mytable.id'], ondelete="CASCADE"), + ForeignKeyConstraint(['parent_id'], ['mytable.id'], ondelete="CASCADE"), ) mapper(MyOtherClass, myothertable) - + mapper(MyClass, mytable, properties={ - 'children':relation(MyOtherClass, cascade="all, delete-orphan", passive_deletes=True) + 'children': relation(MyOtherClass, cascade="all, delete-orphan", passive_deletes=True) }) When `passive_deletes` is applied, the `children` relation will not be loaded into memory when an instance of `MyClass` is marked for deletion. The `cascade="all, delete-orphan"` *will* take effect for instances of `MyOtherClass` which are currently present in the session; however for instances of `MyOtherClass` which are not loaded, SQLAlchemy assumes that "ON DELETE CASCADE" rules will ensure that those rows are deleted by the database and that no foreign key violation will occur. - + #### Mutable Primary Keys / Update Cascades {@name=mutablepks} As of SQLAlchemy 0.4.2, the primary key attributes of an instance can be changed freely, and will be persisted upon flush. When the primary key of an entity changes, related items which reference the primary key must also be updated as well. For databases which enforce referential integrity, it's required to use the database's ON UPDATE CASCADE functionality in order to propagate primary key changes. For those which don't, the `passive_cascades` flag can be set to `False` which instructs SQLAlchemy to issue UPDATE statements individually. The `passive_cascades` flag can also be `False` in conjunction with ON UPDATE CASCADE functionality, although in that case it issues UPDATE statements unnecessarily. @@ -1532,20 +1531,20 @@ A typical mutable primary key setup might look like: users = Table('users', metadata, Column('username', String(50), primary_key=True), Column('fullname', String(100))) - + addresses = Table('addresses', metadata, Column('email', String(50), primary_key=True), Column('username', String(50), ForeignKey('users.username', onupdate="cascade"))) - + class User(object): pass class Address(object): pass mapper(User, users, properties={ - 'addresses':relation(Address, passive_updates=False) + 'addresses': relation(Address, passive_updates=False) }) mapper(Address, addresses) - + passive_updates is set to `True` by default. Foreign key references to non-primary key columns are supported as well. |