max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
ogs5py/fileclasses/mcp/core.py
MuellerSeb/ogs5py
3
100
<gh_stars>1-10 # -*- coding: utf-8 -*- """Class for the ogs COMPONENT_PROPERTIES file.""" from ogs5py.fileclasses.base import BlockFile class MCP(BlockFile): """ Class for the ogs COMPONENT_PROPERTIES file. Parameters ---------- task_root : str, optional Path to the destiny model folder. Default: cwd+"ogs5model" task_id : str, optional Name for the ogs task. Default: "model" Notes ----- Main-Keywords (#): - COMPONENT_PROPERTIES Sub-Keywords ($) per Main-Keyword: - COMPONENT_PROPERTIES - ACENTRIC_FACTOR - A_ZERO - BUBBLE_VELOCITY - CRITICAL_PRESSURE - CRITICAL_TEMPERATURE - DECAY - DIFFUSION - FLUID_ID - FLUID_PHASE - FORMULA - ISOTHERM - MAXIMUM_AQUEOUS_SOLUBILITY - MINERAL_DENSITY - MOBILE - MOLAR_DENSITY - MOLAR_VOLUME - MOLAR_WEIGHT - MOL_MASS - NAME - OutputMassOfComponentInModel - TRANSPORT_PHASE - VALENCE - VOLUME_DIFFUSION Standard block: None Keyword documentation: https://ogs5-keywords.netlify.com/ogs/wiki/public/doc-auto/by_ext/mcp Reading routines: https://github.com/ufz/ogs5/blob/master/FEM/rfmat_cp.cpp#L269 See Also -------- add_block """ MKEYS = ["COMPONENT_PROPERTIES"] # sorted SKEYS = [ [ "NAME", "FORMULA", "MOBILE", "TRANSPORT_PHASE", "FLUID_PHASE", "MOL_MASS", "CRITICAL_PRESSURE", "CRITICAL_TEMPERATURE", "ACENTRIC_FACTOR", "FLUID_ID", "MOLAR_VOLUME", "VOLUME_DIFFUSION", "MINERAL_DENSITY", "DIFFUSION", "DECAY", "ISOTHERM", "BUBBLE_VELOCITY", "MOLAR_DENSITY", "MOLAR_WEIGHT", "MAXIMUM_AQUEOUS_SOLUBILITY", "OutputMassOfComponentInModel", "VALENCE", "A_ZERO", "CRITICAL_VOLUME", # really? "CRITICAL_DENSITY", # really? "COMP_CAPACITY", # really? "COMP_CONDUCTIVITY", # really? "SOLUTE", # really? "MOLECULAR_WEIGHT", # really? ] ] STD = {} def __init__(self, **OGS_Config): super().__init__(**OGS_Config) self.file_ext = ".mcp"
1.945313
2
keystone/tests/unit/test_v3_assignment.py
crowdy/keystone
0
101
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random import uuid import freezegun import http.client from testtools import matchers from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.resource.backends import base as resource_base from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class SystemRoleAssignmentMixin(object): def _create_new_role(self): """Create a role available for use anywhere and return the ID.""" ref = unit.new_role_ref() response = self.post('/roles', body={'role': ref}) # We only really need the role ID, so omit the rest of the response and # return the ID of the role we just created. return response.json_body['role']['id'] def _create_group(self): body = { 'group': { 'domain_id': self.domain_id, 'name': uuid.uuid4().hex } } response = self.post('/groups/', body=body) return response.json_body['group'] def _create_user(self): body = { 'user': { 'domain_id': self.domain_id, 'name': uuid.uuid4().hex } } response = self.post('/users/', body=body) return response.json_body['user'] class AssignmentTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin, SystemRoleAssignmentMixin): """Test roles and role assignments.""" def setUp(self): super(AssignmentTestCase, self).setUp() self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = PROVIDERS.identity_api.create_group(self.group) self.group_id = self.group['id'] # Role CRUD tests def test_create_role(self): """Call ``POST /roles``.""" ref = unit.new_role_ref() r = self.post( '/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def test_create_role_bad_request(self): """Call ``POST /roles``.""" self.post('/roles', body={'role': {}}, expected_status=http.client.BAD_REQUEST) def test_list_head_roles(self): """Call ``GET & HEAD /roles``.""" resource_url = '/roles' r = self.get(resource_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=resource_url) self.head(resource_url, expected_status=http.client.OK) def test_get_head_role(self): """Call ``GET & HEAD /roles/{role_id}``.""" resource_url = '/roles/%(role_id)s' % { 'role_id': self.role_id} r = self.get(resource_url) self.assertValidRoleResponse(r, self.role) self.head(resource_url, expected_status=http.client.OK) def test_update_role(self): """Call ``PATCH /roles/{role_id}``.""" ref = unit.new_role_ref() del ref['id'] r = self.patch('/roles/%(role_id)s' % { 'role_id': self.role_id}, body={'role': ref}) self.assertValidRoleResponse(r, ref) def test_delete_role(self): """Call ``DELETE /roles/{role_id}``.""" self.delete('/roles/%(role_id)s' % { 'role_id': self.role_id}) # Role Grants tests def test_crud_user_project_role_grants(self): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.project['id'], 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': role['id']} # There is a role assignment for self.user on self.project r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, expected_length=1) self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role, resource_url=collection_url, expected_length=2) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, expected_length=1) self.assertIn(collection_url, r.result['links']['self']) self.head(collection_url, expected_status=http.client.OK) def test_crud_user_project_role_grants_no_user(self): """Grant role on a project to a user that doesn't exist. When grant a role on a project to a user that doesn't exist, the server returns Not Found for the user. """ user_id = uuid.uuid4().hex collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.project['id'], 'user_id': user_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def test_crud_user_domain_role_grants(self): time = datetime.datetime.utcnow() with freezegun.freeze_time(time) as frozen_datetime: collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=collection_url) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) # NOTE(lbragstad): Make sure we wait a second before we ask for the # roles. This ensures the token we use isn't considered revoked # because it was issued within the same second as a revocation # event. frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) self.head(collection_url, expected_status=http.client.OK) def test_crud_user_domain_role_grants_no_user(self): """Grant role on a domain to a user that doesn't exist. When grant a role on a domain to a user that doesn't exist, the server returns 404 Not Found for the user. """ user_id = uuid.uuid4().hex collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': user_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def test_crud_group_project_role_grants(self): time = datetime.datetime.utcnow() with freezegun.freeze_time(time) as frozen_datetime: collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % { 'project_id': self.project_id, 'group_id': self.group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=collection_url) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) # NOTE(lbragstad): Make sure we wait a second before we ask for the # roles. This ensures the token we use isn't considered revoked # because it was issued within the same second as a revocation # event. frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) self.head(collection_url, expected_status=http.client.OK) def test_crud_group_project_role_grants_no_group(self): """Grant role on a project to a group that doesn't exist. When grant a role on a project to a group that doesn't exist, the server returns 404 Not Found for the group. """ group_id = uuid.uuid4().hex collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % { 'project_id': self.project_id, 'group_id': group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def test_crud_group_domain_role_grants(self): time = datetime.datetime.utcnow() with freezegun.freeze_time(time) as frozen_datetime: collection_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': self.domain_id, 'group_id': self.group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, resource_url=collection_url) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) # NOTE(lbragstad): Make sure we wait a second before we ask for the # roles. This ensures the token we use isn't considered revoked # because it was issued within the same second as a revocation # event. frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) self.head(collection_url, expected_status=http.client.OK) def test_crud_group_domain_role_grants_no_group(self): """Grant role on a domain to a group that doesn't exist. When grant a role on a domain to a group that doesn't exist, the server returns 404 Not Found for the group. """ group_id = uuid.uuid4().hex collection_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': self.domain_id, 'group_id': group_id}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def _create_new_user_and_assign_role_on_project(self): """Create a new user and assign user a role on a project.""" # Create a new user new_user = unit.new_user_ref(domain_id=self.domain_id) user_ref = PROVIDERS.identity_api.create_user(new_user) # Assign the user a role on the project collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.project_id, 'user_id': user_ref['id']}) member_url = ('%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id}) self.put(member_url) # Check the user has the role assigned self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) return member_url, user_ref def test_delete_user_before_removing_role_assignment_succeeds(self): """Call ``DELETE`` on the user before the role assignment.""" member_url, user = self._create_new_user_and_assign_role_on_project() # Delete the user from identity backend PROVIDERS.identity_api.driver.delete_user(user['id']) # Clean up the role assignment self.delete(member_url) # Make sure the role is gone self.head(member_url, expected_status=http.client.NOT_FOUND) def test_delete_group_before_removing_role_assignment_succeeds(self): # Disable the cache so that we perform a fresh check of the identity # backend when attempting to remove the role assignment. self.config_fixture.config(group='cache', enabled=False) # Create a new group group = unit.new_group_ref(domain_id=self.domain_id) group_ref = PROVIDERS.identity_api.create_group(group) # Assign the user a role on the project collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % { 'project_id': self.project_id, 'group_id': group_ref['id']}) member_url = ('%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id}) self.put(member_url) # Check the user has the role assigned self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) # Simulate removing the group via LDAP by directly removing it from the # identity backend. PROVIDERS.identity_api.driver.delete_group(group_ref['id']) # Ensure we can clean up the role assignment even though the group # doesn't exist self.delete(member_url) def test_delete_user_before_removing_system_assignments_succeeds(self): system_role = self._create_new_role() user = self._create_user() path = ( '/system/users/%(user_id)s/roles/%(role_id)s' % {'user_id': user['id'], 'role_id': system_role} ) self.put(path) response = self.get('/role_assignments') number_of_assignments = len(response.json_body['role_assignments']) path = '/users/%(user_id)s' % {'user_id': user['id']} self.delete(path) # The user with the system role assignment is a new user and only has # one role on the system. We should expect one less role assignment in # the list. response = self.get('/role_assignments') self.assertValidRoleAssignmentListResponse( response, expected_length=number_of_assignments - 1 ) def test_delete_user_and_check_role_assignment_fails(self): """Call ``DELETE`` on the user and check the role assignment.""" member_url, user = self._create_new_user_and_assign_role_on_project() # Delete the user from identity backend PROVIDERS.identity_api.delete_user(user['id']) # We should get a 404 Not Found when looking for the user in the # identity backend because we're not performing a delete operation on # the role. self.head(member_url, expected_status=http.client.NOT_FOUND) def test_token_revoked_once_group_role_grant_revoked(self): """Test token invalid when direct & indirect role on user is revoked. When a role granted to a group is revoked for a given scope, and user direct role is revoked, then tokens created by user will be invalid. """ time = datetime.datetime.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # creates grant from group on project. PROVIDERS.assignment_api.create_grant( role_id=self.role['id'], project_id=self.project['id'], group_id=self.group['id'] ) # adds user to the group. PROVIDERS.identity_api.add_user_to_group( user_id=self.user['id'], group_id=self.group['id'] ) # creates a token for the user auth_body = self.build_authentication_request( user_id=self.user['id'], password=<PASSWORD>['password'], project_id=self.project['id']) token_resp = self.post('/auth/tokens', body=auth_body) token = token_resp.headers.get('x-subject-token') # validates the returned token; it should be valid. self.head('/auth/tokens', headers={'x-subject-token': token}, expected_status=http.client.OK) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) # revokes the grant from group on project. PROVIDERS.assignment_api.delete_grant( role_id=self.role['id'], project_id=self.project['id'], group_id=self.group['id']) # revokes the direct role form user on project PROVIDERS.assignment_api.delete_grant( role_id=self.role['id'], project_id=self.project['id'], user_id=self.user['id'] ) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) # validates the same token again; it should not longer be valid. self.head('/auth/tokens', token=token, expected_status=http.client.UNAUTHORIZED) def test_delete_group_before_removing_system_assignments_succeeds(self): system_role = self._create_new_role() group = self._create_group() path = ( '/system/groups/%(group_id)s/roles/%(role_id)s' % {'group_id': group['id'], 'role_id': system_role} ) self.put(path) response = self.get('/role_assignments') number_of_assignments = len(response.json_body['role_assignments']) path = '/groups/%(group_id)s' % {'group_id': group['id']} self.delete(path) # The group with the system role assignment is a new group and only has # one role on the system. We should expect one less role assignment in # the list. response = self.get('/role_assignments') self.assertValidRoleAssignmentListResponse( response, expected_length=number_of_assignments - 1 ) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_user_and_project_invalidate_cache(self): # create a new project new_project = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(new_project['id'], new_project) collection_url = ( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': new_project['id'], 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the user a grant on the new project self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the project resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_user_and_domain_invalidates_cache(self): # create a new domain new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': new_domain['id'], 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the user a grant on the new domain self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the domain resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_group_and_project_invalidates_cache(self): # create a new project new_project = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(new_project['id'], new_project) collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % { 'project_id': new_project['id'], 'group_id': self.group['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the group a grant on the new project self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the project resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_group_and_domain_invalidates_cache(self): # create a new domain new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) collection_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': new_domain['id'], 'group_id': self.group['id']}) member_url = '%(collection_url)s/%(role_id)s' % { 'collection_url': collection_url, 'role_id': self.role_id} # create the group a grant on the new domain self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse(resp, ref=self.role, resource_url=collection_url) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the domain resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) # Role Assignments tests def test_get_head_role_assignments(self): """Call ``GET & HEAD /role_assignments``. The sample data set up already has a user, group and project that is part of self.domain. We use these plus a new user we create as our data set, making sure we ignore any role assignments that are already in existence. Since we don't yet support a first class entity for role assignments, we are only testing the LIST API. To create and delete the role assignments we use the old grant APIs. Test Plan: - Create extra user for tests - Get a list of all existing role assignments - Add a new assignment for each of the four combinations, i.e. group+domain, user+domain, group+project, user+project, using the same role each time - Get a new list of all role assignments, checking these four new ones have been added - Then delete the four we added - Get a new list of all role assignments, checking the four have been removed """ time = datetime.datetime.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # Since the default fixtures already assign some roles to the # user it creates, we also need a new user that will not have any # existing assignments user1 = unit.new_user_ref(domain_id=self.domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) self.head(collection_url, expected_status=http.client.OK) existing_assignments = len(r.result.get('role_assignments')) # Now add one of each of the four types of assignment, making sure # that we get them all back. gd_entity = self.build_role_assignment_entity( domain_id=self.domain_id, group_id=self.group_id, role_id=role['id']) self.put(gd_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) self.head(collection_url, expected_status=http.client.OK) ud_entity = self.build_role_assignment_entity( domain_id=self.domain_id, user_id=user1['id'], role_id=role['id']) self.put(ud_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, ud_entity) self.head(collection_url, expected_status=http.client.OK) gp_entity = self.build_role_assignment_entity( project_id=self.project_id, group_id=self.group_id, role_id=role['id']) self.put(gp_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 3, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gp_entity) self.head(collection_url, expected_status=http.client.OK) up_entity = self.build_role_assignment_entity( project_id=self.project_id, user_id=user1['id'], role_id=role['id']) self.put(up_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 4, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) self.head(collection_url, expected_status=http.client.OK) # Now delete the four we added and make sure they are removed # from the collection. self.delete(gd_entity['links']['assignment']) self.delete(ud_entity['links']['assignment']) self.delete(gp_entity['links']['assignment']) self.delete(up_entity['links']['assignment']) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments, resource_url=collection_url) self.assertRoleAssignmentNotInListResponse(r, gd_entity) self.assertRoleAssignmentNotInListResponse(r, ud_entity) self.assertRoleAssignmentNotInListResponse(r, gp_entity) self.assertRoleAssignmentNotInListResponse(r, up_entity) self.head(collection_url, expected_status=http.client.OK) def test_get_effective_role_assignments(self): """Call ``GET /role_assignments?effective``. Test Plan: - Create two extra user for tests - Add these users to a group - Add a role assignment for the group on a domain - Get a list of all role assignments, checking one has been added - Then get a list of all effective role assignments - the group assignment should have turned into assignments on the domain for each of the group members. """ user1 = unit.create_user(PROVIDERS.identity_api, domain_id=self.domain['id']) user2 = unit.create_user(PROVIDERS.identity_api, domain_id=self.domain['id']) PROVIDERS.identity_api.add_user_to_group(user1['id'], self.group['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], self.group['id']) collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) existing_assignments = len(r.result.get('role_assignments')) gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id, group_id=self.group_id, role_id=self.role_id) self.put(gd_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now re-read the collection asking for effective roles - this # should mean the group assignment is translated into the two # member user assignments collection_url = '/role_assignments?effective' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) ud_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], domain_id=self.domain_id, user_id=user1['id'], role_id=self.role_id) self.assertRoleAssignmentInListResponse(r, ud_entity) ud_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], domain_id=self.domain_id, user_id=user2['id'], role_id=self.role_id) self.assertRoleAssignmentInListResponse(r, ud_entity) def test_check_effective_values_for_role_assignments(self): """Call ``GET & HEAD /role_assignments?effective=value``. Check the various ways of specifying the 'effective' query parameter. If the 'effective' query parameter is included then this should always be treated as meaning 'True' unless it is specified as: {url}?effective=0 This is by design to match the agreed way of handling policy checking on query/filter parameters. Test Plan: - Create two extra user for tests - Add these users to a group - Add a role assignment for the group on a domain - Get a list of all role assignments, checking one has been added - Then issue various request with different ways of defining the 'effective' query parameter. As we have tested the correctness of the data coming back when we get effective roles in other tests, here we just use the count of entities to know if we are getting effective roles or not """ user1 = unit.create_user(PROVIDERS.identity_api, domain_id=self.domain['id']) user2 = unit.create_user(PROVIDERS.identity_api, domain_id=self.domain['id']) PROVIDERS.identity_api.add_user_to_group(user1['id'], self.group['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], self.group['id']) collection_url = '/role_assignments' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) existing_assignments = len(r.result.get('role_assignments')) gd_entity = self.build_role_assignment_entity(domain_id=self.domain_id, group_id=self.group_id, role_id=self.role_id) self.put(gd_entity['links']['assignment']) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now re-read the collection asking for effective roles, # using the most common way of defining "effective'. This # should mean the group assignment is translated into the two # member user assignments collection_url = '/role_assignments?effective' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) # Now set 'effective' to false explicitly - should get # back the regular roles collection_url = '/role_assignments?effective=0' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url) # Now try setting 'effective' to 'False' explicitly- this is # NOT supported as a way of setting a query or filter # parameter to false by design. Hence we should get back # effective roles. collection_url = '/role_assignments?effective=False' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) # Now set 'effective' to True explicitly collection_url = '/role_assignments?effective=True' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url) def test_filtered_role_assignments(self): """Call ``GET /role_assignments?filters``. Test Plan: - Create extra users, group, role and project for tests - Make the following assignments: Give group1, role1 on project1 and domain Give user1, role2 on project1 and domain Make User1 a member of Group1 - Test a series of single filter list calls, checking that the correct results are obtained - Test a multi-filtered list call - Test listing all effective roles for a given user - Test the equivalent of the list of roles in a project scoped token (all effective roles for a user on a project) """ # Since the default fixtures already assign some roles to the # user it creates, we also need a new user that will not have any # existing assignments user1 = unit.create_user(PROVIDERS.identity_api, domain_id=self.domain['id']) user2 = unit.create_user(PROVIDERS.identity_api, domain_id=self.domain['id']) group1 = unit.new_group_ref(domain_id=self.domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], group1['id']) project1 = unit.new_project_ref(domain_id=self.domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) self.role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(self.role1['id'], self.role1) self.role2 = unit.new_role_ref() PROVIDERS.role_api.create_role(self.role2['id'], self.role2) # Now add one of each of the six types of assignment gd_entity = self.build_role_assignment_entity( domain_id=self.domain_id, group_id=group1['id'], role_id=self.role1['id']) self.put(gd_entity['links']['assignment']) ud_entity = self.build_role_assignment_entity(domain_id=self.domain_id, user_id=user1['id'], role_id=self.role2['id']) self.put(ud_entity['links']['assignment']) gp_entity = self.build_role_assignment_entity( project_id=project1['id'], group_id=group1['id'], role_id=self.role1['id']) self.put(gp_entity['links']['assignment']) up_entity = self.build_role_assignment_entity( project_id=project1['id'], user_id=user1['id'], role_id=self.role2['id']) self.put(up_entity['links']['assignment']) gs_entity = self.build_role_assignment_entity( system='all', group_id=group1['id'], role_id=self.role1['id']) self.put(gs_entity['links']['assignment']) us_entity = self.build_role_assignment_entity( system='all', user_id=user1['id'], role_id=self.role2['id']) self.put(us_entity['links']['assignment']) us2_entity = self.build_role_assignment_entity( system='all', user_id=user2['id'], role_id=self.role2['id']) self.put(us2_entity['links']['assignment']) # Now list by various filters to make sure we get back the right ones collection_url = ('/role_assignments?scope.project.id=%s' % project1['id']) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) collection_url = ('/role_assignments?scope.domain.id=%s' % self.domain['id']) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, gd_entity) collection_url = '/role_assignments?user.id=%s' % user1['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, ud_entity) collection_url = '/role_assignments?group.id=%s' % group1['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) collection_url = '/role_assignments?role.id=%s' % self.role1['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, gd_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) self.assertRoleAssignmentInListResponse(r, gs_entity) collection_url = '/role_assignments?role.id=%s' % self.role2['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=4, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, us_entity) # Let's try combining two filers together.... collection_url = ( '/role_assignments?user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(r, up_entity) # Now for a harder one - filter for user with effective # roles - this should return role assignment that were directly # assigned as well as by virtue of group membership collection_url = ('/role_assignments?effective&user.id=%s' % user1['id']) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=4, resource_url=collection_url) # Should have the two direct roles... self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, ud_entity) # ...and the two via group membership... gp1_link = self.build_role_assignment_link( project_id=project1['id'], group_id=group1['id'], role_id=self.role1['id']) gd1_link = self.build_role_assignment_link(domain_id=self.domain_id, group_id=group1['id'], role_id=self.role1['id']) up1_entity = self.build_role_assignment_entity( link=gp1_link, project_id=project1['id'], user_id=user1['id'], role_id=self.role1['id']) ud1_entity = self.build_role_assignment_entity( link=gd1_link, domain_id=self.domain_id, user_id=user1['id'], role_id=self.role1['id']) self.assertRoleAssignmentInListResponse(r, up1_entity) self.assertRoleAssignmentInListResponse(r, ud1_entity) # ...and for the grand-daddy of them all, simulate the request # that would generate the list of effective roles in a project # scoped token. collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) # Should have one direct role and one from group membership... self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, up1_entity) def test_list_system_role_assignments(self): # create a bunch of roles user_system_role_id = self._create_new_role() user_domain_role_id = self._create_new_role() user_project_role_id = self._create_new_role() group_system_role_id = self._create_new_role() group_domain_role_id = self._create_new_role() group_project_role_id = self._create_new_role() # create a user and grant the user a role on the system, domain, and # project user = self._create_user() url = '/system/users/%s/roles/%s' % (user['id'], user_system_role_id) self.put(url) url = '/domains/%s/users/%s/roles/%s' % ( self.domain_id, user['id'], user_domain_role_id ) self.put(url) url = '/projects/%s/users/%s/roles/%s' % ( self.project_id, user['id'], user_project_role_id ) self.put(url) # create a group and grant the group a role on the system, domain, and # project group = self._create_group() url = '/system/groups/%s/roles/%s' % ( group['id'], group_system_role_id ) self.put(url) url = '/domains/%s/groups/%s/roles/%s' % ( self.domain_id, group['id'], group_domain_role_id ) self.put(url) url = '/projects/%s/groups/%s/roles/%s' % ( self.project_id, group['id'], group_project_role_id ) self.put(url) # /v3/role_assignments?scope.system=all should return two assignments response = self.get('/role_assignments?scope.system=all') self.assertValidRoleAssignmentListResponse(response, expected_length=2) for assignment in response.json_body['role_assignments']: self.assertTrue(assignment['scope']['system']['all']) if assignment.get('user'): self.assertEqual(user_system_role_id, assignment['role']['id']) if assignment.get('group'): self.assertEqual( group_system_role_id, assignment['role']['id'] ) # /v3/role_assignments?scope_system=all&user.id=$USER_ID should return # one role assignment url = '/role_assignments?scope.system=all&user.id=%s' % user['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( user_system_role_id, response.json_body['role_assignments'][0]['role']['id'] ) # /v3/role_assignments?scope_system=all&group.id=$GROUP_ID should # return one role assignment url = '/role_assignments?scope.system=all&group.id=%s' % group['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( group_system_role_id, response.json_body['role_assignments'][0]['role']['id'] ) # /v3/role_assignments?user.id=$USER_ID should return 3 assignments # and system should be in that list of assignments url = '/role_assignments?user.id=%s' % user['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=3) for assignment in response.json_body['role_assignments']: if 'system' in assignment['scope']: self.assertEqual( user_system_role_id, assignment['role']['id'] ) if 'domain' in assignment['scope']: self.assertEqual( user_domain_role_id, assignment['role']['id'] ) if 'project' in assignment['scope']: self.assertEqual( user_project_role_id, assignment['role']['id'] ) # /v3/role_assignments?group.id=$GROUP_ID should return 3 assignments # and system should be in that list of assignments url = '/role_assignments?group.id=%s' % group['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=3) for assignment in response.json_body['role_assignments']: if 'system' in assignment['scope']: self.assertEqual( group_system_role_id, assignment['role']['id'] ) if 'domain' in assignment['scope']: self.assertEqual( group_domain_role_id, assignment['role']['id'] ) if 'project' in assignment['scope']: self.assertEqual( group_project_role_id, assignment['role']['id'] ) class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Base class for testing /v3/role_assignments API behavior.""" MAX_HIERARCHY_BREADTH = 3 MAX_HIERARCHY_DEPTH = CONF.max_project_tree_depth - 1 def load_sample_data(self): """Create sample data to be used on tests. Created data are i) a role and ii) a domain containing: a project hierarchy and 3 users within 3 groups. """ def create_project_hierarchy(parent_id, depth): """Create a random project hierarchy.""" if depth == 0: return breadth = random.randint(1, self.MAX_HIERARCHY_BREADTH) subprojects = [] for i in range(breadth): subprojects.append(unit.new_project_ref( domain_id=self.domain_id, parent_id=parent_id)) PROVIDERS.resource_api.create_project( subprojects[-1]['id'], subprojects[-1] ) new_parent = subprojects[random.randint(0, breadth - 1)] create_project_hierarchy(new_parent['id'], depth - 1) super(RoleAssignmentBaseTestCase, self).load_sample_data() # Create a domain self.domain = unit.new_domain_ref() self.domain_id = self.domain['id'] PROVIDERS.resource_api.create_domain(self.domain_id, self.domain) # Create a project hierarchy self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] PROVIDERS.resource_api.create_project(self.project_id, self.project) # Create a random project hierarchy create_project_hierarchy(self.project_id, random.randint(1, self.MAX_HIERARCHY_DEPTH)) # Create 3 users self.user_ids = [] for i in range(3): user = unit.new_user_ref(domain_id=self.domain_id) user = PROVIDERS.identity_api.create_user(user) self.user_ids.append(user['id']) # Create 3 groups self.group_ids = [] for i in range(3): group = unit.new_group_ref(domain_id=self.domain_id) group = PROVIDERS.identity_api.create_group(group) self.group_ids.append(group['id']) # Put 2 members on each group PROVIDERS.identity_api.add_user_to_group( user_id=self.user_ids[i], group_id=group['id'] ) PROVIDERS.identity_api.add_user_to_group( user_id=self.user_ids[i % 2], group_id=group['id'] ) PROVIDERS.assignment_api.create_grant( user_id=self.user_id, project_id=self.project_id, role_id=self.role_id ) # Create a role self.role = unit.new_role_ref() self.role_id = self.role['id'] PROVIDERS.role_api.create_role(self.role_id, self.role) # Set default user and group to be used on tests self.default_user_id = self.user_ids[0] self.default_group_id = self.group_ids[0] def get_role_assignments(self, expected_status=http.client.OK, **filters): """Return the result from querying role assignment API + queried URL. Calls GET /v3/role_assignments?<params> and returns its result, where <params> is the HTTP query parameters form of effective option plus filters, if provided. Queried URL is returned as well. :returns: a tuple containing the list role assignments API response and queried URL. """ query_url = self._get_role_assignments_query_url(**filters) response = self.get(query_url, expected_status=expected_status) return (response, query_url) def _get_role_assignments_query_url(self, **filters): """Return non-effective role assignments query URL from given filters. :param filters: query parameters are created with the provided filters on role assignments attributes. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: role assignments query URL. """ return self.build_role_assignment_query_url(**filters) class RoleAssignmentFailureTestCase(RoleAssignmentBaseTestCase): """Class for testing invalid query params on /v3/role_assignments API. Querying domain and project, or user and group results in a HTTP 400 Bad Request, since a role assignment must contain only a single pair of (actor, target). In addition, since filtering on role assignments applies only to the final result, effective mode cannot be combined with i) group or ii) domain and inherited, because it would always result in an empty list. """ def test_get_role_assignments_by_domain_and_project(self): self.get_role_assignments(domain_id=self.domain_id, project_id=self.project_id, expected_status=http.client.BAD_REQUEST) def test_get_role_assignments_by_user_and_group(self): self.get_role_assignments(user_id=self.default_user_id, group_id=self.default_group_id, expected_status=http.client.BAD_REQUEST) def test_get_role_assignments_by_effective_and_inherited(self): self.get_role_assignments(domain_id=self.domain_id, effective=True, inherited_to_projects=True, expected_status=http.client.BAD_REQUEST) def test_get_role_assignments_by_effective_and_group(self): self.get_role_assignments(effective=True, group_id=self.default_group_id, expected_status=http.client.BAD_REQUEST) class RoleAssignmentDirectTestCase(RoleAssignmentBaseTestCase): """Class for testing direct assignments on /v3/role_assignments API. Direct assignments on a domain or project have effect on them directly, instead of on their project hierarchy, i.e they are non-inherited. In addition, group direct assignments are not expanded to group's users. Tests on this class make assertions on the representation and API filtering of direct assignments. """ def _test_get_role_assignments(self, **filters): """Generic filtering test method. According to the provided filters, this method: - creates a new role assignment; - asserts that list role assignments API reponds correctly; - deletes the created role assignment. :param filters: filters to be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. """ # Fills default assignment with provided filters test_assignment = self._set_default_assignment_attributes(**filters) # Create new role assignment for this test PROVIDERS.assignment_api.create_grant(**test_assignment) # Get expected role assignments expected_assignments = self._list_expected_role_assignments( **test_assignment) # Get role assignments from API response, query_url = self.get_role_assignments(**test_assignment) self.assertValidRoleAssignmentListResponse(response, resource_url=query_url) self.assertEqual(len(expected_assignments), len(response.result.get('role_assignments'))) # Assert that expected role assignments were returned by the API call for assignment in expected_assignments: self.assertRoleAssignmentInListResponse(response, assignment) # Delete created role assignment PROVIDERS.assignment_api.delete_grant(**test_assignment) def _set_default_assignment_attributes(self, **attribs): """Insert default values for missing attributes of role assignment. If no actor, target or role are provided, they will default to values from sample data. :param attribs: info from a role assignment entity. Valid attributes are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. """ if not any(target in attribs for target in ('domain_id', 'projects_id')): attribs['project_id'] = self.project_id if not any(actor in attribs for actor in ('user_id', 'group_id')): attribs['user_id'] = self.default_user_id if 'role_id' not in attribs: attribs['role_id'] = self.role_id return attribs def _list_expected_role_assignments(self, **filters): """Given the filters, it returns expected direct role assignments. :param filters: filters that will be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: the list of the expected role assignments. """ return [self.build_role_assignment_entity(**filters)] # Test cases below call the generic test method, providing different filter # combinations. Filters are provided as specified in the method name, after # 'by'. For example, test_get_role_assignments_by_project_user_and_role # calls the generic test method with project_id, user_id and role_id. def test_get_role_assignments_by_domain(self, **filters): self._test_get_role_assignments(domain_id=self.domain_id, **filters) def test_get_role_assignments_by_project(self, **filters): self._test_get_role_assignments(project_id=self.project_id, **filters) def test_get_role_assignments_by_user(self, **filters): self._test_get_role_assignments(user_id=self.default_user_id, **filters) def test_get_role_assignments_by_group(self, **filters): self._test_get_role_assignments(group_id=self.default_group_id, **filters) def test_get_role_assignments_by_role(self, **filters): self._test_get_role_assignments(role_id=self.role_id, **filters) def test_get_role_assignments_by_domain_and_user(self, **filters): self.test_get_role_assignments_by_domain(user_id=self.default_user_id, **filters) def test_get_role_assignments_by_domain_and_group(self, **filters): self.test_get_role_assignments_by_domain( group_id=self.default_group_id, **filters) def test_get_role_assignments_by_project_and_user(self, **filters): self.test_get_role_assignments_by_project(user_id=self.default_user_id, **filters) def test_get_role_assignments_by_project_and_group(self, **filters): self.test_get_role_assignments_by_project( group_id=self.default_group_id, **filters) def test_get_role_assignments_by_domain_user_and_role(self, **filters): self.test_get_role_assignments_by_domain_and_user(role_id=self.role_id, **filters) def test_get_role_assignments_by_domain_group_and_role(self, **filters): self.test_get_role_assignments_by_domain_and_group( role_id=self.role_id, **filters) def test_get_role_assignments_by_project_user_and_role(self, **filters): self.test_get_role_assignments_by_project_and_user( role_id=self.role_id, **filters) def test_get_role_assignments_by_project_group_and_role(self, **filters): self.test_get_role_assignments_by_project_and_group( role_id=self.role_id, **filters) class RoleAssignmentInheritedTestCase(RoleAssignmentDirectTestCase): """Class for testing inherited assignments on /v3/role_assignments API. Inherited assignments on a domain or project have no effect on them directly, but on the projects under them instead. Tests on this class do not make assertions on the effect of inherited assignments, but in their representation and API filtering. """ def _test_get_role_assignments(self, **filters): """Add inherited_to_project filter to expected entity in tests.""" super(RoleAssignmentInheritedTestCase, self)._test_get_role_assignments(inherited_to_projects=True, **filters) class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase): """Class for testing inheritance effects on /v3/role_assignments API. Inherited assignments on a domain or project have no effect on them directly, but on the projects under them instead. Tests on this class make assertions on the effect of inherited assignments and API filtering. """ def _get_role_assignments_query_url(self, **filters): """Return effective role assignments query URL from given filters. For test methods in this class, effetive will always be true. As in effective mode, inherited_to_projects, group_id, domain_id and project_id will always be desconsidered from provided filters. :param filters: query parameters are created with the provided filters. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: role assignments query URL. """ query_filters = filters.copy() query_filters.pop('inherited_to_projects') query_filters.pop('group_id', None) query_filters.pop('domain_id', None) query_filters.pop('project_id', None) return self.build_role_assignment_query_url(effective=True, **query_filters) def _list_expected_role_assignments(self, **filters): """Given the filters, it returns expected direct role assignments. :param filters: filters that will be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: the list of the expected role assignments. """ # Get assignment link, to be put on 'links': {'assignment': link} assignment_link = self.build_role_assignment_link(**filters) # Expand group membership user_ids = [None] if filters.get('group_id'): user_ids = [user['id'] for user in PROVIDERS.identity_api.list_users_in_group( filters['group_id'])] else: user_ids = [self.default_user_id] # Expand role inheritance project_ids = [None] if filters.get('domain_id'): project_ids = [project['id'] for project in PROVIDERS.resource_api.list_projects_in_domain( filters.pop('domain_id'))] else: project_ids = [project['id'] for project in PROVIDERS.resource_api.list_projects_in_subtree( self.project_id)] # Compute expected role assignments assignments = [] for project_id in project_ids: filters['project_id'] = project_id for user_id in user_ids: filters['user_id'] = user_id assignments.append(self.build_role_assignment_entity( link=assignment_link, **filters)) return assignments class AssignmentInheritanceTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Test inheritance crud and its effects.""" def test_get_token_from_inherited_user_domain_role_grants(self): # Create a new user to ensure that no grant is loaded from sample data user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) # Define domain and project authentication data domain_auth_data = self.build_authentication_request( user_id=user['id'], password=<PASSWORD>['password'], domain_id=self.domain_id) project_auth_data = self.build_authentication_request( user_id=user['id'], password=<PASSWORD>['password'], project_id=self.project_id) # Check the user cannot get a domain nor a project token self.v3_create_token(domain_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(project_auth_data, expected_status=http.client.UNAUTHORIZED) # Grant non-inherited role for user on domain non_inher_ud_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id) self.put(non_inher_ud_link) # Check the user can get only a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http.client.UNAUTHORIZED) # Create inherited role inherited_role = unit.new_role_ref(name='inherited') PROVIDERS.role_api.create_role(inherited_role['id'], inherited_role) # Grant inherited role for user on domain inher_ud_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=inherited_role['id'], inherited_to_projects=True) self.put(inher_ud_link) # Check the user can get both a domain and a project token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data) # Delete inherited grant self.delete(inher_ud_link) # Check the user can only get a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http.client.UNAUTHORIZED) # Delete non-inherited grant self.delete(non_inher_ud_link) # Check the user cannot get a domain token anymore self.v3_create_token(domain_auth_data, expected_status=http.client.UNAUTHORIZED) def test_get_token_from_inherited_group_domain_role_grants(self): # Create a new group and put a new user in it to # ensure that no grant is loaded from sample data user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) group = unit.new_group_ref(domain_id=self.domain['id']) group = PROVIDERS.identity_api.create_group(group) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) # Define domain and project authentication data domain_auth_data = self.build_authentication_request( user_id=user['id'], password=<PASSWORD>['password'], domain_id=self.domain_id) project_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=self.project_id) # Check the user cannot get a domain nor a project token self.v3_create_token(domain_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(project_auth_data, expected_status=http.client.UNAUTHORIZED) # Grant non-inherited role for user on domain non_inher_gd_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id) self.put(non_inher_gd_link) # Check the user can get only a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http.client.UNAUTHORIZED) # Create inherited role inherited_role = unit.new_role_ref(name='inherited') PROVIDERS.role_api.create_role(inherited_role['id'], inherited_role) # Grant inherited role for user on domain inher_gd_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=inherited_role['id'], inherited_to_projects=True) self.put(inher_gd_link) # Check the user can get both a domain and a project token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data) # Delete inherited grant self.delete(inher_gd_link) # Check the user can only get a domain token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data, expected_status=http.client.UNAUTHORIZED) # Delete non-inherited grant self.delete(non_inher_gd_link) # Check the user cannot get a domain token anymore self.v3_create_token(domain_auth_data, expected_status=http.client.UNAUTHORIZED) def _test_crud_inherited_and_direct_assignment_on_target(self, target_url): time = datetime.datetime.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # Create a new role to avoid assignments loaded from sample data role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # Define URLs direct_url = '%s/users/%s/roles/%s' % ( target_url, self.user_id, role['id']) inherited_url = ('/OS-INHERIT/%s/inherited_to_projects' % direct_url.lstrip('/')) # Create the direct assignment self.put(direct_url) # Check the direct assignment exists, but the inherited one does # not self.head(direct_url) self.head(inherited_url, expected_status=http.client.NOT_FOUND) # Now add the inherited assignment self.put(inherited_url) # Check both the direct and inherited assignment exist self.head(direct_url) self.head(inherited_url) # Delete indirect assignment self.delete(inherited_url) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) # Check the direct assignment exists, but the inherited one does # not self.head(direct_url) self.head(inherited_url, expected_status=http.client.NOT_FOUND) # Now delete the inherited assignment self.delete(direct_url) # Check that none of them exist self.head(direct_url, expected_status=http.client.NOT_FOUND) self.head(inherited_url, expected_status=http.client.NOT_FOUND) def test_crud_inherited_and_direct_assignment_on_domains(self): self._test_crud_inherited_and_direct_assignment_on_target( '/domains/%s' % self.domain_id) def test_crud_inherited_and_direct_assignment_on_projects(self): self._test_crud_inherited_and_direct_assignment_on_target( '/projects/%s' % self.project_id) def test_crud_user_inherited_domain_role_grants(self): role_list = [] for _ in range(2): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) # Create a non-inherited role as a spoiler PROVIDERS.assignment_api.create_grant( role_list[1]['id'], user_id=self.user['id'], domain_id=self.domain_id) base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.domain_id, 'user_id': self.user['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[0]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) # Check we can read it back self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[0], resource_url=collection_url) # Now delete and check its gone self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, expected_length=0, resource_url=collection_url) def test_list_role_assignments_for_inherited_domain_grants(self): """Call ``GET /role_assignments with inherited domain grants``. Test Plan: - Create 4 roles - Create a domain with a user and two projects - Assign two direct roles to project1 - Assign a spoiler role to project2 - Issue the URL to add inherited role to the domain - Issue the URL to check it is indeed on the domain - Issue the URL to check effective roles on project1 - this should return 3 roles. """ role_list = [] for _ in range(4): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some roles to the project PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id']) # ..and one on a different project as a spoiler PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id']) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': domain['id'], 'user_id': user1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) # Now use the list domain role assignments api to check if this # is included collection_url = ( '/role_assignments?user.id=%(user_id)s' '&scope.domain.id=%(domain_id)s' % { 'user_id': user1['id'], 'domain_id': domain['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=1, resource_url=collection_url) ud_entity = self.build_role_assignment_entity( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, ud_entity) # Now ask for effective list role assignments - the role should # turn into a project role, along with the two direct roles that are # on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) # An effective role for an inherited role will be a project # entity, with a domain link to the inherited assignment ud_url = self.build_role_assignment_link( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) up_entity = self.build_role_assignment_entity( link=ud_url, project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, up_entity) def _test_list_role_assignments_include_names(self, role1): """Call ``GET /role_assignments with include names``. Test Plan: - Create a domain with a group and a user - Create a project with a group and a user """ role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) group = unit.new_group_ref(domain_id=self.domain_id) group = PROVIDERS.identity_api.create_group(group) project1 = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project1['id'], project1) expected_entity1 = self.build_role_assignment_entity_include_names( role_ref=role1, project_ref=project1, user_ref=user1) self.put(expected_entity1['links']['assignment']) expected_entity2 = self.build_role_assignment_entity_include_names( role_ref=role1, domain_ref=self.domain, group_ref=group) self.put(expected_entity2['links']['assignment']) expected_entity3 = self.build_role_assignment_entity_include_names( role_ref=role1, domain_ref=self.domain, user_ref=user1) self.put(expected_entity3['links']['assignment']) expected_entity4 = self.build_role_assignment_entity_include_names( role_ref=role1, project_ref=project1, group_ref=group) self.put(expected_entity4['links']['assignment']) collection_url_domain = ( '/role_assignments?include_names&scope.domain.id=%(domain_id)s' % { 'domain_id': self.domain_id}) rs_domain = self.get(collection_url_domain) collection_url_project = ( '/role_assignments?include_names&' 'scope.project.id=%(project_id)s' % { 'project_id': project1['id']}) rs_project = self.get(collection_url_project) collection_url_group = ( '/role_assignments?include_names&group.id=%(group_id)s' % { 'group_id': group['id']}) rs_group = self.get(collection_url_group) collection_url_user = ( '/role_assignments?include_names&user.id=%(user_id)s' % { 'user_id': user1['id']}) rs_user = self.get(collection_url_user) collection_url_role = ( '/role_assignments?include_names&role.id=%(role_id)s' % { 'role_id': role1['id']}) rs_role = self.get(collection_url_role) # Make sure all entities were created successfully self.assertEqual(http.client.OK, rs_domain.status_int) self.assertEqual(http.client.OK, rs_project.status_int) self.assertEqual(http.client.OK, rs_group.status_int) self.assertEqual(http.client.OK, rs_user.status_int) # Make sure we can get back the correct number of entities self.assertValidRoleAssignmentListResponse( rs_domain, expected_length=2, resource_url=collection_url_domain) self.assertValidRoleAssignmentListResponse( rs_project, expected_length=2, resource_url=collection_url_project) self.assertValidRoleAssignmentListResponse( rs_group, expected_length=2, resource_url=collection_url_group) self.assertValidRoleAssignmentListResponse( rs_user, expected_length=2, resource_url=collection_url_user) self.assertValidRoleAssignmentListResponse( rs_role, expected_length=4, resource_url=collection_url_role) # Verify all types of entities have the correct format self.assertRoleAssignmentInListResponse(rs_domain, expected_entity2) self.assertRoleAssignmentInListResponse(rs_project, expected_entity1) self.assertRoleAssignmentInListResponse(rs_group, expected_entity4) self.assertRoleAssignmentInListResponse(rs_user, expected_entity3) self.assertRoleAssignmentInListResponse(rs_role, expected_entity1) def test_list_role_assignments_include_names_global_role(self): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) self._test_list_role_assignments_include_names(role) def test_list_role_assignments_include_names_domain_role(self): role = unit.new_role_ref(domain_id=self.domain['id']) PROVIDERS.role_api.create_role(role['id'], role) self._test_list_role_assignments_include_names(role) def test_remove_assignment_for_project_acting_as_domain(self): """Test goal: remove assignment for project acting as domain. Ensure when we have two role assignments for the project acting as domain, one dealing with it as a domain and other as a project, we still able to remove those assignments later. Test plan: - Create a role and a domain with a user; - Grant a role for this user in this domain; - Grant a role for this user in the same entity as a project; - Ensure that both assignments were created and it was valid; - Remove the domain assignment for the user and show that the project assignment for him still valid """ role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) assignment_domain = self.build_role_assignment_entity( role_id=role['id'], domain_id=domain['id'], user_id=user['id'], inherited_to_projects=False) assignment_project = self.build_role_assignment_entity( role_id=role['id'], project_id=domain['id'], user_id=user['id'], inherited_to_projects=False) self.put(assignment_domain['links']['assignment']) self.put(assignment_project['links']['assignment']) collection_url = '/role_assignments?user.id=%(user_id)s' % ( {'user_id': user['id']}) result = self.get(collection_url) # We have two role assignments based in both roles for the domain and # project scope self.assertValidRoleAssignmentListResponse( result, expected_length=2, resource_url=collection_url) self.assertRoleAssignmentInListResponse(result, assignment_domain) domain_url = '/domains/%s/users/%s/roles/%s' % ( domain['id'], user['id'], role['id']) self.delete(domain_url) collection_url = '/role_assignments?user.id=%(user_id)s' % ( {'user_id': user['id']}) result = self.get(collection_url) # Now we only have one assignment for the project scope since the # domain scope was removed. self.assertValidRoleAssignmentListResponse( result, expected_length=1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(result, assignment_project) def test_list_inherited_role_assignments_include_names(self): """Call ``GET /role_assignments?include_names``. Test goal: ensure calling list role assignments including names honors the inherited role assignments flag. Test plan: - Create a role and a domain with a user; - Create a inherited role assignment; - List role assignments for that user; - List role assignments for that user including names. """ role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) # Create and store expected assignment refs assignment = self.build_role_assignment_entity( role_id=role['id'], domain_id=domain['id'], user_id=user['id'], inherited_to_projects=True) assignment_names = self.build_role_assignment_entity_include_names( role_ref=role, domain_ref=domain, user_ref=user, inherited_assignment=True) # Ensure expected assignment refs are inherited and have the same URL self.assertEqual('projects', assignment['scope']['OS-INHERIT:inherited_to']) self.assertEqual('projects', assignment_names['scope']['OS-INHERIT:inherited_to']) self.assertEqual(assignment['links']['assignment'], assignment_names['links']['assignment']) self.put(assignment['links']['assignment']) collection_url = '/role_assignments?user.id=%(user_id)s' % ( {'user_id': user['id']}) result = self.get(collection_url) self.assertValidRoleAssignmentListResponse( result, expected_length=1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(result, assignment) collection_url = ('/role_assignments?include_names&' 'user.id=%(user_id)s' % {'user_id': user['id']}) result = self.get(collection_url) self.assertValidRoleAssignmentListResponse( result, expected_length=1, resource_url=collection_url) self.assertRoleAssignmentInListResponse(result, assignment_names) def test_list_role_assignments_for_disabled_inheritance_extension(self): """Call ``GET /role_assignments with inherited domain grants``. Test Plan: - Issue the URL to add inherited role to the domain - Issue the URL to check effective roles on project include the inherited role - Disable the extension - Re-check the effective roles, proving the inherited role no longer shows up. """ role_list = [] for _ in range(4): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some roles to the project PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id']) # ..and one on a different project as a spoiler PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id']) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': domain['id'], 'user_id': user1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) # Get effective list role assignments - the role should # turn into a project role, along with the two direct roles that are # on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) ud_url = self.build_role_assignment_link( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) up_entity = self.build_role_assignment_entity( link=ud_url, project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, up_entity) def test_list_role_assignments_for_inherited_group_domain_grants(self): """Call ``GET /role_assignments with inherited group domain grants``. Test Plan: - Create 4 roles - Create a domain with a user and two projects - Assign two direct roles to project1 - Assign a spoiler role to project2 - Issue the URL to add inherited role to the domain - Issue the URL to check it is indeed on the domain - Issue the URL to check effective roles on project1 - this should return 3 roles. """ role_list = [] for _ in range(4): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.identity_api.add_user_to_group( user1['id'], group1['id'] ) PROVIDERS.identity_api.add_user_to_group( user2['id'], group1['id'] ) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some roles to the project PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id']) # ..and one on a different project as a spoiler PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id']) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': domain['id'], 'group_id': group1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) # Now use the list domain role assignments api to check if this # is included collection_url = ( '/role_assignments?group.id=%(group_id)s' '&scope.domain.id=%(domain_id)s' % { 'group_id': group1['id'], 'domain_id': domain['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=1, resource_url=collection_url) gd_entity = self.build_role_assignment_entity( domain_id=domain['id'], group_id=group1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now ask for effective list role assignments - the role should # turn into a user project role, along with the two direct roles # that are on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % { 'user_id': user1['id'], 'project_id': project1['id']}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=3, resource_url=collection_url) # An effective role for an inherited role will be a project # entity, with a domain link to the inherited assignment up_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, up_entity) def test_filtered_role_assignments_for_inherited_grants(self): """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``. Test Plan: - Create 5 roles - Create a domain with a user, group and two projects - Assign three direct spoiler roles to projects - Issue the URL to add an inherited user role to the domain - Issue the URL to add an inherited group role to the domain - Issue the URL to filter by inherited roles - this should return just the 2 inherited roles. """ role_list = [] for _ in range(5): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some spoiler roles to the projects PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id']) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[1]['id']) # Create a non-inherited role as a spoiler PROVIDERS.assignment_api.create_grant( role_list[2]['id'], user_id=user1['id'], domain_id=domain['id']) # Now create two inherited roles on the domain, one for a user # and one for a domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': domain['id'], 'user_id': user1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[3]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[3], resource_url=collection_url) base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': domain['id'], 'group_id': group1['id']}) member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % { 'collection_url': base_collection_url, 'role_id': role_list[4]['id']} collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=role_list[4], resource_url=collection_url) # Now use the list role assignments api to get a list of inherited # roles on the domain - should get back the two roles collection_url = ( '/role_assignments?scope.OS-INHERIT:inherited_to=projects') r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, expected_length=2, resource_url=collection_url) ud_entity = self.build_role_assignment_entity( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True) gd_entity = self.build_role_assignment_entity( domain_id=domain['id'], group_id=group1['id'], role_id=role_list[4]['id'], inherited_to_projects=True) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, gd_entity) def _setup_hierarchical_projects_scenario(self): """Create basic hierarchical projects scenario. This basic scenario contains a root with one leaf project and two roles with the following names: non-inherited and inherited. """ # Create project hierarchy root = unit.new_project_ref(domain_id=self.domain['id']) leaf = unit.new_project_ref(domain_id=self.domain['id'], parent_id=root['id']) PROVIDERS.resource_api.create_project(root['id'], root) PROVIDERS.resource_api.create_project(leaf['id'], leaf) # Create 'non-inherited' and 'inherited' roles non_inherited_role = unit.new_role_ref(name='non-inherited') PROVIDERS.role_api.create_role( non_inherited_role['id'], non_inherited_role ) inherited_role = unit.new_role_ref(name='inherited') PROVIDERS.role_api.create_role(inherited_role['id'], inherited_role) return (root['id'], leaf['id'], non_inherited_role['id'], inherited_role['id']) def test_get_token_from_inherited_user_project_role_grants(self): # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Define root and leaf projects authentication data root_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=<PASSWORD>['password'], project_id=root_id) leaf_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=<PASSWORD>['password'], project_id=leaf_id) # Check the user cannot get a token on root nor leaf project self.v3_create_token(root_project_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED) # Grant non-inherited role for user on leaf project non_inher_up_link = self.build_role_assignment_link( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_link) # Check the user can only get a token on leaf project self.v3_create_token(root_project_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Grant inherited role for user on root project inher_up_link = self.build_role_assignment_link( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_link) # Check the user still can get a token only on leaf project self.v3_create_token(root_project_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Delete non-inherited grant self.delete(non_inher_up_link) # Check the inherited role still applies for leaf project self.v3_create_token(root_project_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Delete inherited grant self.delete(inher_up_link) # Check the user cannot get a token on leaf project anymore self.v3_create_token(leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED) def test_get_token_from_inherited_group_project_role_grants(self): # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Create group and add user to it group = unit.new_group_ref(domain_id=self.domain['id']) group = PROVIDERS.identity_api.create_group(group) PROVIDERS.identity_api.add_user_to_group(self.user['id'], group['id']) # Define root and leaf projects authentication data root_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=<PASSWORD>['password'], project_id=root_id) leaf_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=<PASSWORD>['password'], project_id=leaf_id) # Check the user cannot get a token on root nor leaf project self.v3_create_token(root_project_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED) # Grant non-inherited role for group on leaf project non_inher_gp_link = self.build_role_assignment_link( project_id=leaf_id, group_id=group['id'], role_id=non_inherited_role_id) self.put(non_inher_gp_link) # Check the user can only get a token on leaf project self.v3_create_token(root_project_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Grant inherited role for group on root project inher_gp_link = self.build_role_assignment_link( project_id=root_id, group_id=group['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_gp_link) # Check the user still can get a token only on leaf project self.v3_create_token(root_project_auth_data, expected_status=http.client.UNAUTHORIZED) self.v3_create_token(leaf_project_auth_data) # Delete no-inherited grant self.delete(non_inher_gp_link) # Check the inherited role still applies for leaf project self.v3_create_token(leaf_project_auth_data) # Delete inherited grant self.delete(inher_gp_link) # Check the user cannot get a token on leaf project anymore self.v3_create_token(leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED) def test_get_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to get all role assignments - this should return just 2 roles (non-inherited and inherited) in the root project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_entity['links']['assignment']) # Get role assignments collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) # Assert that the user has non-inherited role on root project self.assertRoleAssignmentInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on root project self.assertRoleAssignmentInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) def test_get_effective_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments?effective``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to get effective role assignments - this should return 1 role (non-inherited) on the root project and 1 role (inherited) on the leaf project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_entity['links']['assignment']) # Get effective role assignments collection_url = '/role_assignments?effective' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) # Assert that the user has non-inherited role on root project self.assertRoleAssignmentInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on root project self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentInListResponse(r, inher_up_entity) def test_project_id_specified_if_include_subtree_specified(self): """When using include_subtree, you must specify a project ID.""" r = self.get('/role_assignments?include_subtree=True', expected_status=http.client.BAD_REQUEST) error_msg = ("scope.project.id must be specified if include_subtree " "is also specified") self.assertEqual(error_msg, r.result['error']['message']) r = self.get('/role_assignments?scope.project.id&' 'include_subtree=True', expected_status=http.client.BAD_REQUEST) self.assertEqual(error_msg, r.result['error']['message']) def test_get_role_assignments_for_project_tree(self): """Get role_assignment?scope.project.id=X&include_subtree``. Test Plan: - Create 2 roles and a hierarchy of projects with one root and one leaf - Issue the URL to add a non-inherited user role to the root project and the leaf project - Issue the URL to get role assignments for the root project but not the subtree - this should return just the root assignment - Issue the URL to get role assignments for the root project and it's subtree - this should return both assignments - Check that explicitly setting include_subtree to False is the equivalent to not including it at all in the query. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, unused_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role to root and leaf projects non_inher_entity_root = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_root['links']['assignment']) non_inher_entity_leaf = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_leaf['links']['assignment']) # Without the subtree, we should get the one assignment on the # root project collection_url = ( '/role_assignments?scope.project.id=%(project)s' % { 'project': root_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) self.assertThat(r.result['role_assignments'], matchers.HasLength(1)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) # With the subtree, we should get both assignments collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=True' % { 'project': root_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) self.assertThat(r.result['role_assignments'], matchers.HasLength(2)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf) # With subtree=0, we should also only get the one assignment on the # root project collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=0' % { 'project': root_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) self.assertThat(r.result['role_assignments'], matchers.HasLength(1)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) def test_get_effective_role_assignments_for_project_tree(self): """Get role_assignment ?project_id=X&include_subtree=True&effective``. Test Plan: - Create 2 roles and a hierarchy of projects with one root and 4 levels of child project - Issue the URL to add a non-inherited user role to the root project and a level 1 project - Issue the URL to add an inherited user role on the level 2 project - Issue the URL to get effective role assignments for the level 1 project and it's subtree - this should return a role (non-inherited) on the level 1 project and roles (inherited) on each of the level 2, 3 and 4 projects """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Add some extra projects to the project hierarchy level2 = unit.new_project_ref(domain_id=self.domain['id'], parent_id=leaf_id) level3 = unit.new_project_ref(domain_id=self.domain['id'], parent_id=level2['id']) level4 = unit.new_project_ref(domain_id=self.domain['id'], parent_id=level3['id']) PROVIDERS.resource_api.create_project(level2['id'], level2) PROVIDERS.resource_api.create_project(level3['id'], level3) PROVIDERS.resource_api.create_project(level4['id'], level4) # Grant non-inherited role to root (as a spoiler) and to # the level 1 (leaf) project non_inher_entity_root = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_root['links']['assignment']) non_inher_entity_leaf = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_entity_leaf['links']['assignment']) # Grant inherited role to level 2 inher_entity = self.build_role_assignment_entity( project_id=level2['id'], user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_entity['links']['assignment']) # Get effective role assignments collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=True&effective' % { 'project': leaf_id}) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url) # There should be three assignments returned in total self.assertThat(r.result['role_assignments'], matchers.HasLength(3)) # Assert that the user does not non-inherited role on root project self.assertRoleAssignmentNotInListResponse(r, non_inher_entity_root) # Assert that the user does have non-inherited role on leaf project self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf) # Assert that the user has inherited role on levels 3 and 4 inher_entity['scope']['project']['id'] = level3['id'] self.assertRoleAssignmentInListResponse(r, inher_entity) inher_entity['scope']['project']['id'] = level4['id'] self.assertRoleAssignmentInListResponse(r, inher_entity) def test_get_inherited_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to filter inherited to projects role assignments - this should return 1 role (inherited) on the root project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario()) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True) self.put(inher_up_entity['links']['assignment']) # Get inherited role assignments collection_url = ('/role_assignments' '?scope.OS-INHERIT:inherited_to=projects') r = self.get(collection_url) self.assertValidRoleAssignmentListResponse(r, resource_url=collection_url) # Assert that the user does not have non-inherited role on root project self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on root project self.assertRoleAssignmentInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) class ImpliedRolesTests(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin, unit.TestCase): def _create_role(self): """Call ``POST /roles``.""" ref = unit.new_role_ref() r = self.post('/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def test_list_implied_roles_none(self): self.prior = self._create_role() url = '/roles/%s/implies' % (self.prior['id']) response = self.get(url).json["role_inference"] self.head(url, expected_status=http.client.OK) self.assertEqual(self.prior['id'], response['prior_role']['id']) self.assertEqual(0, len(response['implies'])) def _create_implied_role(self, prior, implied): self.put('/roles/%s/implies/%s' % (prior['id'], implied['id']), expected_status=http.client.CREATED) def _delete_implied_role(self, prior, implied): self.delete('/roles/%s/implies/%s' % (prior['id'], implied['id'])) def _setup_prior_two_implied(self): self.prior = self._create_role() self.implied1 = self._create_role() self._create_implied_role(self.prior, self.implied1) self.implied2 = self._create_role() self._create_implied_role(self.prior, self.implied2) def _assert_expected_implied_role_response( self, expected_prior_id, expected_implied_ids): r = self.get('/roles/%s/implies' % expected_prior_id) response = r.json role_inference = response['role_inference'] self.assertEqual(expected_prior_id, role_inference['prior_role']['id']) prior_link = '/v3/roles/' + expected_prior_id + '/implies' self.assertThat(response['links']['self'], matchers.EndsWith(prior_link)) actual_implied_ids = [implied['id'] for implied in role_inference['implies']] self.assertItemsEqual(expected_implied_ids, actual_implied_ids) self.assertIsNotNone(role_inference['prior_role']['links']['self']) for implied in role_inference['implies']: self.assertIsNotNone(implied['links']['self']) def _assert_expected_role_inference_rule_response( self, expected_prior_id, expected_implied_id): url = '/roles/%s/implies/%s' % (expected_prior_id, expected_implied_id) response = self.get(url).json self.assertThat(response['links']['self'], matchers.EndsWith('/v3%s' % url)) role_inference = response['role_inference'] prior_role = role_inference['prior_role'] self.assertEqual(expected_prior_id, prior_role['id']) self.assertIsNotNone(prior_role['name']) self.assertThat(prior_role['links']['self'], matchers.EndsWith('/v3/roles/%s' % expected_prior_id)) implied_role = role_inference['implies'] self.assertEqual(expected_implied_id, implied_role['id']) self.assertIsNotNone(implied_role['name']) self.assertThat(implied_role['links']['self'], matchers.EndsWith( '/v3/roles/%s' % expected_implied_id)) def _assert_two_roles_implied(self): self._assert_expected_implied_role_response( self.prior['id'], [self.implied1['id'], self.implied2['id']]) self._assert_expected_role_inference_rule_response( self.prior['id'], self.implied1['id']) self._assert_expected_role_inference_rule_response( self.prior['id'], self.implied2['id']) def _assert_one_role_implied(self): self._assert_expected_implied_role_response( self.prior['id'], [self.implied1['id']]) self.get('/roles/%s/implies/%s' % (self.prior['id'], self.implied2['id']), expected_status=http.client.NOT_FOUND) def _assert_two_rules_defined(self): r = self.get('/role_inferences/') rules = r.result['role_inferences'] self.assertEqual(self.prior['id'], rules[0]['prior_role']['id']) self.assertEqual(2, len(rules[0]['implies'])) implied_ids = [implied['id'] for implied in rules[0]['implies']] implied_names = [implied['name'] for implied in rules[0]['implies']] self.assertIn(self.implied1['id'], implied_ids) self.assertIn(self.implied2['id'], implied_ids) self.assertIn(self.implied1['name'], implied_names) self.assertIn(self.implied2['name'], implied_names) def _assert_one_rule_defined(self): r = self.get('/role_inferences/') rules = r.result['role_inferences'] self.assertEqual(self.prior['id'], rules[0]['prior_role']['id']) self.assertEqual(self.implied1['id'], rules[0]['implies'][0]['id']) self.assertEqual(self.implied1['name'], rules[0]['implies'][0]['name']) self.assertEqual(1, len(rules[0]['implies'])) def test_list_all_rules(self): self._setup_prior_two_implied() self._assert_two_rules_defined() self._delete_implied_role(self.prior, self.implied2) self._assert_one_rule_defined() def test_CRD_implied_roles(self): self._setup_prior_two_implied() self._assert_two_roles_implied() self._delete_implied_role(self.prior, self.implied2) self._assert_one_role_implied() def _create_three_roles(self): self.role_list = [] for _ in range(3): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) self.role_list.append(role) def _create_test_domain_user_project(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) return domain, user, project def _assign_top_role_to_user_on_project(self, user, project): PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], project['id'], self.role_list[0]['id']) def _build_effective_role_assignments_url(self, user): return '/role_assignments?effective&user.id=%(user_id)s' % { 'user_id': user['id']} def _assert_all_roles_in_assignment(self, response, user): # Now use the list role assignments api to check that all three roles # appear in the collection self.assertValidRoleAssignmentListResponse( response, expected_length=len(self.role_list), resource_url=self._build_effective_role_assignments_url(user)) def _assert_initial_assignment_in_effective(self, response, user, project): # The initial assignment should be there (the link url will be # generated and checked automatically since it matches the assignment) entity = self.build_role_assignment_entity( project_id=project['id'], user_id=user['id'], role_id=self.role_list[0]['id']) self.assertRoleAssignmentInListResponse(response, entity) def _assert_effective_role_for_implied_has_prior_in_links( self, response, user, project, prior_index, implied_index): # An effective role for an implied role will have the prior role # assignment in the links prior_link = '/prior_roles/%(prior)s/implies/%(implied)s' % { 'prior': self.role_list[prior_index]['id'], 'implied': self.role_list[implied_index]['id']} link = self.build_role_assignment_link( project_id=project['id'], user_id=user['id'], role_id=self.role_list[prior_index]['id']) entity = self.build_role_assignment_entity( link=link, project_id=project['id'], user_id=user['id'], role_id=self.role_list[implied_index]['id'], prior_link=prior_link) self.assertRoleAssignmentInListResponse(response, entity) def test_list_role_assignments_with_implied_roles(self): """Call ``GET /role_assignments`` with implied role grant. Test Plan: - Create a domain with a user and a project - Create 3 roles - Role 0 implies role 1 and role 1 implies role 2 - Assign the top role to the project - Issue the URL to check effective roles on project - this should return all 3 roles. - Check the links of the 3 roles indicate the prior role where appropriate """ (domain, user, project) = self._create_test_domain_user_project() self._create_three_roles() self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(user, project) response = self.get(self._build_effective_role_assignments_url(user)) r = response self._assert_all_roles_in_assignment(r, user) self._assert_initial_assignment_in_effective(response, user, project) self._assert_effective_role_for_implied_has_prior_in_links( response, user, project, 0, 1) self._assert_effective_role_for_implied_has_prior_in_links( response, user, project, 1, 2) def _create_named_role(self, name): role = unit.new_role_ref() role['name'] = name PROVIDERS.role_api.create_role(role['id'], role) return role def test_root_role_as_implied_role_forbidden(self): """Test root role is forbidden to be set as an implied role. Create 2 roles that are prohibited from being an implied role. Create 1 additional role which should be accepted as an implied role. Assure the prohibited role names cannot be set as an implied role. Assure the accepted role name which is not a member of the prohibited implied role list can be successfully set an implied role. """ prohibited_name1 = 'root1' prohibited_name2 = 'root2' accepted_name1 = 'implied1' prohibited_names = [prohibited_name1, prohibited_name2] self.config_fixture.config(group='assignment', prohibited_implied_role=prohibited_names) prior_role = self._create_role() prohibited_role1 = self._create_named_role(prohibited_name1) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=prohibited_role1['id']) self.put(url, expected_status=http.client.FORBIDDEN) prohibited_role2 = self._create_named_role(prohibited_name2) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=prohibited_role2['id']) self.put(url, expected_status=http.client.FORBIDDEN) accepted_role1 = self._create_named_role(accepted_name1) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=accepted_role1['id']) self.put(url, expected_status=http.client.CREATED) def test_trusts_from_implied_role(self): self._create_three_roles() self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(self.user, self.project) # Create a trustee and assign the prior role to her trustee = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) ref = unit.new_trust_ref( trustor_user_id=self.user['id'], trustee_user_id=trustee['id'], project_id=self.project['id'], role_ids=[self.role_list[0]['id']]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = r.result['trust'] # Only the role that was specified is in the trust, NOT implied roles self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id']) self.assertThat(trust['roles'], matchers.HasLength(1)) # Authenticate as the trustee auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) token = r.result['token'] self.assertThat(token['roles'], matchers.HasLength(len(self.role_list))) for role in token['roles']: self.assertIn(role, self.role_list) for role in self.role_list: self.assertIn(role, token['roles']) def test_trusts_from_domain_specific_implied_role(self): self._create_three_roles() # Overwrite the first role with a domain specific role role = unit.new_role_ref(domain_id=self.domain_id) self.role_list[0] = PROVIDERS.role_api.create_role(role['id'], role) self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(self.user, self.project) # Create a trustee and assign the prior role to her trustee = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) ref = unit.new_trust_ref( trustor_user_id=self.user['id'], trustee_user_id=trustee['id'], project_id=self.project['id'], role_ids=[self.role_list[0]['id']]) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = r.result['trust'] # Only the role that was specified is in the trust, NOT implied roles self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id']) self.assertThat(trust['roles'], matchers.HasLength(1)) # Authenticate as the trustee auth_data = self.build_authentication_request( user_id=trustee['id'], password=<PASSWORD>ee['password'], trust_id=trust['id']) r = self.v3_create_token(auth_data) token = r.result['token'] # The token should have the roles implies by the domain specific role, # but not the domain specific role itself. self.assertThat(token['roles'], matchers.HasLength(len(self.role_list) - 1)) for role in token['roles']: self.assertIn(role, self.role_list) for role in [self.role_list[1], self.role_list[2]]: self.assertIn(role, token['roles']) self.assertNotIn(self.role_list[0], token['roles']) def test_global_role_cannot_imply_domain_specific_role(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) domain_role_ref = unit.new_role_ref(domain_id=domain['id']) domain_role = PROVIDERS.role_api.create_role( domain_role_ref['id'], domain_role_ref ) global_role_ref = unit.new_role_ref() global_role = PROVIDERS.role_api.create_role( global_role_ref['id'], global_role_ref ) self.put('/roles/%s/implies/%s' % (global_role['id'], domain_role['id']), expected_status=http.client.FORBIDDEN) class DomainSpecificRoleTests(test_v3.RestfulTestCase, unit.TestCase): def setUp(self): def create_role(domain_id=None): """Call ``POST /roles``.""" ref = unit.new_role_ref(domain_id=domain_id) r = self.post( '/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) super(DomainSpecificRoleTests, self).setUp() self.domainA = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainB['id'], self.domainB) self.global_role1 = create_role() self.global_role2 = create_role() # Since there maybe other global roles already created, let's count # them, so we can ensure we can check subsequent list responses # are correct r = self.get('/roles') self.existing_global_roles = len(r.result['roles']) # And now create some domain specific roles self.domainA_role1 = create_role(domain_id=self.domainA['id']) self.domainA_role2 = create_role(domain_id=self.domainA['id']) self.domainB_role = create_role(domain_id=self.domainB['id']) def test_get_and_list_domain_specific_roles(self): # Check we can get a domain specific role r = self.get('/roles/%s' % self.domainA_role1['id']) self.assertValidRoleResponse(r, self.domainA_role1) # If we list without specifying a domain, we should only get global # roles back. r = self.get('/roles') self.assertValidRoleListResponse( r, expected_length=self.existing_global_roles) self.assertRoleInListResponse(r, self.global_role1) self.assertRoleInListResponse(r, self.global_role2) self.assertRoleNotInListResponse(r, self.domainA_role1) self.assertRoleNotInListResponse(r, self.domainA_role2) self.assertRoleNotInListResponse(r, self.domainB_role) # Now list those in domainA, making sure that's all we get back r = self.get('/roles?domain_id=%s' % self.domainA['id']) self.assertValidRoleListResponse(r, expected_length=2) self.assertRoleInListResponse(r, self.domainA_role1) self.assertRoleInListResponse(r, self.domainA_role2) def test_update_domain_specific_roles(self): self.domainA_role1['name'] = uuid.uuid4().hex self.patch('/roles/%(role_id)s' % { 'role_id': self.domainA_role1['id']}, body={'role': self.domainA_role1}) r = self.get('/roles/%s' % self.domainA_role1['id']) self.assertValidRoleResponse(r, self.domainA_role1) def test_delete_domain_specific_roles(self): # Check delete only removes that one domain role self.delete('/roles/%(role_id)s' % { 'role_id': self.domainA_role1['id']}) self.get('/roles/%s' % self.domainA_role1['id'], expected_status=http.client.NOT_FOUND) # Now re-list those in domainA, making sure there's only one left r = self.get('/roles?domain_id=%s' % self.domainA['id']) self.assertValidRoleListResponse(r, expected_length=1) self.assertRoleInListResponse(r, self.domainA_role2) def test_same_domain_assignment(self): user = unit.create_user(PROVIDERS.identity_api, domain_id=self.domainA['id']) projectA = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project(projectA['id'], projectA) PROVIDERS.assignment_api.create_grant( self.domainA_role1['id'], user_id=user['id'], project_id=projectA['id'] ) def test_cross_domain_assignment_valid(self): user = unit.create_user(PROVIDERS.identity_api, domain_id=self.domainB['id']) projectA = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project(projectA['id'], projectA) # Positive: a role on domainA can be assigned to a user from domainB # but only for use on a project from domainA PROVIDERS.assignment_api.create_grant( self.domainA_role1['id'], user_id=user['id'], project_id=projectA['id'] ) def test_cross_domain_assignment_invalid(self): user = unit.create_user(PROVIDERS.identity_api, domain_id=self.domainB['id']) projectB = unit.new_project_ref(domain_id=self.domainB['id']) PROVIDERS.resource_api.create_project(projectB['id'], projectB) # Negative: a role on domainA can be assigned to a user from domainB # only for a project from domainA self.assertRaises(exception.DomainSpecificRoleMismatch, PROVIDERS.assignment_api.create_grant, self.domainA_role1['id'], user_id=user['id'], project_id=projectB['id']) def test_cross_domain_implied_roles_authentication(self): # Create a user in domainB user = unit.create_user(PROVIDERS.identity_api, domain_id=self.domainB['id']) # Create project in domainA projectA = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project(projectA['id'], projectA) # Now we create an implied rule from a role in domainA to a # role in domainB self.put('/roles/%s/implies/%s' % (self.domainA_role1['id'], self.domainB_role['id']), expected_status=http.client.CREATED) # A role in domainA can be assigned to a user from domainB # only for a project from domainA PROVIDERS.assignment_api.create_grant( self.domainA_role1['id'], user_id=user['id'], project_id=projectA['id'] ) # The role assignments should return an empty list since domain roles # can only be used to imply another roles assignments = PROVIDERS.assignment_api.list_role_assignments( user_id=user['id'], effective=True) self.assertEqual([], assignments) # This also means we can't authenticate using the existing assignment auth_body = self.build_authentication_request( user_id=user['id'], password=<PASSWORD>['password'], project_id=projectA['id']) self.post('/auth/tokens', body=auth_body, expected_status=http.client.UNAUTHORIZED) class ListUserProjectsTestCase(test_v3.RestfulTestCase): """Test for /users/<user>/projects.""" def load_sample_data(self): # do not load base class's data, keep it focused on the tests self.auths = [] self.domains = [] self.projects = [] self.roles = [] self.users = [] root_domain = unit.new_domain_ref( id=resource_base.NULL_DOMAIN_ID, name=resource_base.NULL_DOMAIN_ID ) self.resource_api.create_domain(resource_base.NULL_DOMAIN_ID, root_domain) # Create 3 sets of domain, roles, projects, and users to demonstrate # the right user's data is loaded and only projects they can access # are returned. for _ in range(3): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) PROVIDERS.assignment_api.create_grant( role['id'], user_id=user['id'], domain_id=domain['id'] ) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( role['id'], user_id=user['id'], project_id=project['id'] ) auth = self.build_authentication_request( user_id=user['id'], password=<PASSWORD>['password'], domain_id=domain['id']) self.auths.append(auth) self.domains.append(domain) self.projects.append(project) self.roles.append(role) self.users.append(user) def test_list_head_all(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] url = '/users/%s/projects' % user['id'] result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) self.head(url, auth=auth, expected_status=http.client.OK) def test_list_enabled(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] # There are no disabled projects url = '/users/%s/projects?enabled=True' % user['id'] result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) def test_list_disabled(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] project = self.projects[i] # There are no disabled projects url = '/users/%s/projects?enabled=False' % user['id'] result = self.get(url, auth=auth) self.assertEqual(0, len(result.json['projects'])) # disable this one and check again project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) def test_list_by_domain_id(self): for i in range(len(self.users)): user = self.users[i] domain = self.domains[i] auth = self.auths[i] # Try looking for projects with a non-existent domain_id url = '/users/%s/projects?domain_id=%s' % (user['id'], uuid.uuid4().hex) result = self.get(url, auth=auth) self.assertEqual(0, len(result.json['projects'])) # Now try a valid one url = '/users/%s/projects?domain_id=%s' % (user['id'], domain['id']) result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) # FIXME(lbragstad): These tests contain system-level API calls, which means # they will log a warning message if they are called with a project-scoped # token, regardless of the role assignment on the project. We need to fix # them by using a proper system-scoped admin token to make the call instead # of a project scoped token. class UserSystemRoleAssignmentTestCase(test_v3.RestfulTestCase, SystemRoleAssignmentMixin): def test_assign_system_role_to_user(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = ( '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } ) self.put(member_url) # validate the role assignment self.head(member_url) # list system roles collection_url = ( '/system/users/%(user_id)s/roles' % {'user_id': self.user['id']} ) roles = self.get(collection_url).json_body['roles'] self.assertEqual(len(roles), 1) self.assertEqual(roles[0]['id'], system_role_id) self.head(collection_url, expected_status=http.client.OK) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertValidRoleAssignmentListResponse(response) def test_list_role_assignments_for_user_returns_all_assignments(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } self.put(member_url) # the response should contain one role assignment for the system role # and one for a role that was setup during setUp(). response = self.get( '/role_assignments?user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=2) def test_list_system_roles_for_user_returns_none_without_assignment(self): # list system roles for user collection_url = '/system/users/%(user_id)s/roles' % { 'user_id': self.user['id'] } response = self.get(collection_url) # assert that the user doesn't have any system role assignments, which # is denoted by an empty list self.assertEqual(response.json_body['roles'], []) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertEqual(len(response.json_body['role_assignments']), 0) self.assertValidRoleAssignmentListResponse(response) def test_list_system_roles_for_user_does_not_return_project_roles(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } self.put(member_url) # list project role assignments and save the role id of that # assignment, this assignment was created during setUp response = self.get( '/projects/%(project_id)s/users/%(user_id)s/roles' % { 'project_id': self.project['id'], 'user_id': self.user['id'] } ) self.assertEqual(len(response.json_body['roles']), 1) project_role_id = response.json_body['roles'][0]['id'] # list system role assignments collection_url = '/system/users/%(user_id)s/roles' % { 'user_id': self.user['id'] } response = self.get(collection_url) # assert the project role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], project_role_id) # make sure the role_assignment API filters correctly based on system # scope response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertEqual(len(response.json_body['role_assignments']), 1) system_assignment = response.json_body['role_assignments'][0] self.assertEqual(system_assignment['role']['id'], system_role_id) self.assertTrue(system_assignment['scope']['system']['all']) # make sure the role_assignment API doesn't include the system role # assignment when we filter based on project path = ( '/role_assignments?scope.project.id=%(project_id)s&' 'user.id=%(user_id)s' ) % {'project_id': self.project['id'], 'user_id': self.user['id']} response = self.get(path) self.assertEqual(len(response.json_body['role_assignments']), 1) project_assignment = response.json_body['role_assignments'][0] self.assertEqual(project_assignment['role']['id'], project_role_id) def test_list_system_roles_for_user_does_not_return_domain_roles(self): system_role_id = self._create_new_role() domain_role_id = self._create_new_role() # assign a role to the user on a domain domain_member_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'domain_id': self.user['domain_id'], 'user_id': self.user['id'], 'role_id': domain_role_id } ) self.put(domain_member_url) # assign the user a role on the system member_url = '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } self.put(member_url) # list domain role assignments response = self.get( '/domains/%(domain_id)s/users/%(user_id)s/roles' % { 'domain_id': self.user['domain_id'], 'user_id': self.user['id'] } ) self.assertEqual(len(response.json_body['roles']), 1) # list system role assignments collection_url = '/system/users/%(user_id)s/roles' % { 'user_id': self.user['id'] } response = self.get(collection_url) # assert the domain role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], domain_role_id) # make sure the role_assignment API filters correctly based on system # scope response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertEqual(len(response.json_body['role_assignments']), 1) system_assignment = response.json_body['role_assignments'][0] self.assertEqual(system_assignment['role']['id'], system_role_id) self.assertTrue(system_assignment['scope']['system']['all']) # make sure the role_assignment API doesn't include the system role # assignment when we filter based on domain path = ( '/role_assignments?scope.domain.id=%(domain_id)s&' 'user.id=%(user_id)s' ) % {'domain_id': self.user['domain_id'], 'user_id': self.user['id']} response = self.get(path) self.assertEqual(len(response.json_body['role_assignments']), 1) domain_assignment = response.json_body['role_assignments'][0] self.assertEqual(domain_assignment['role']['id'], domain_role_id) def test_check_user_has_system_role_when_assignment_exists(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } self.put(member_url) # check the user has the system role assignment self.head(member_url) def test_check_user_does_not_have_system_role_without_assignment(self): system_role_id = self._create_new_role() # check the user does't have the system role assignment member_url = '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } self.head(member_url, expected_status=http.client.NOT_FOUND) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertEqual(len(response.json_body['role_assignments']), 0) self.assertValidRoleAssignmentListResponse(response) def test_unassign_system_role_from_user(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } self.put(member_url) # ensure the user has the role assignment self.head(member_url) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertEqual(len(response.json_body['role_assignments']), 1) self.assertValidRoleAssignmentListResponse(response) # remove the system role assignment from the user self.delete(member_url) # ensure the user doesn't have any system role assignments collection_url = '/system/users/%(user_id)s/roles' % { 'user_id': self.user['id'] } response = self.get(collection_url) self.assertEqual(len(response.json_body['roles']), 0) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % { 'user_id': self.user['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_query_for_system_scope_and_domain_scope_fails(self): # When asking for assignments and providing query parameters, we # shouldn't be able to ask for two different types of scope. This is # also true for project + domain scope. path = ( '/role_assignments?scope.system=all' '&scope.domain.id=%(domain_id)s' ) % {'domain_id': self.domain_id} self.get(path, expected_status=http.client.BAD_REQUEST) def test_query_for_system_scope_and_project_scope_fails(self): # When asking for assignments and providing query parameters, we # shouldn't be able to ask for two different types of scope. This is # also true for project + domain scope. path = ( '/role_assignments?scope.system=all' '&scope.project.id=%(project_id)s' ) % {'project_id': self.project_id} self.get(path, expected_status=http.client.BAD_REQUEST) def test_query_for_role_id_does_not_return_system_user_roles(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'role_id': system_role_id } self.put(member_url) # Make sure we only get one role assignment back since the system role # assignment shouldn't be returned. path = ( '/role_assignments?role.id=%(role_id)s&user.id=%(user_id)s' ) % {'role_id': self.role_id, 'user_id': self.user['id']} response = self.get(path) self.assertValidRoleAssignmentListResponse(response, expected_length=1) # FIXME(lbragstad): These tests contain system-level API calls, which means # they will log a warning message if they are called with a project-scoped # token, regardless of the role assignment on the project. We need to fix # them by using a proper system-scoped admin token to make the call instead # of a project scoped token. class GroupSystemRoleAssignmentTestCase(test_v3.RestfulTestCase, SystemRoleAssignmentMixin): def test_assign_system_role_to_group(self): system_role_id = self._create_new_role() group = self._create_group() # assign the role to the group globally member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) # validate the role assignment self.head(member_url) # list global roles collection_url = '/system/groups/%(group_id)s/roles' % { 'group_id': group['id'] } roles = self.get(collection_url).json_body['roles'] self.assertEqual(len(roles), 1) self.assertEqual(roles[0]['id'], system_role_id) self.head(collection_url, expected_status=http.client.OK) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( response.json_body['role_assignments'][0]['role']['id'], system_role_id ) def test_assign_system_role_to_non_existant_group_fails(self): system_role_id = self._create_new_role() group_id = uuid.uuid4().hex # assign the role to the group globally member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group_id, 'role_id': system_role_id } self.put(member_url, expected_status=http.client.NOT_FOUND) def test_list_role_assignments_for_group_returns_all_assignments(self): system_role_id = self._create_new_role() group = self._create_group() # assign the role to the group globally and on a single project member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) member_url = ( '/projects/%(project_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' ) % { 'project_id': self.project_id, 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) # make sure both assignments exist in the response, there should be two response = self.get( '/role_assignments?group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=2) def test_list_system_roles_for_group_returns_none_without_assignment(self): group = self._create_group() # list global roles for group collection_url = '/system/groups/%(group_id)s/roles' % { 'group_id': group['id'] } response = self.get(collection_url) # assert that the group doesn't have any system role assignments, which # is denoted by an empty list self.assertEqual(response.json_body['roles'], []) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_list_system_roles_for_group_does_not_return_project_roles(self): system_role_id = self._create_new_role() project_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system and a role on a project member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) member_url = ( '/projects/%(project_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' ) % { 'project_id': self.project_id, 'group_id': group['id'], 'role_id': project_role_id } self.put(member_url) # list system role assignments collection_url = '/system/groups/%(group_id)s/roles' % { 'group_id': group['id'] } response = self.get(collection_url) # assert the project role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], project_role_id) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) def test_list_system_roles_for_group_does_not_return_domain_roles(self): system_role_id = self._create_new_role() domain_role_id = self._create_new_role() group = self._create_group() # assign a role to the group on a domain domain_member_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' % { 'domain_id': group['domain_id'], 'group_id': group['id'], 'role_id': domain_role_id } ) self.put(domain_member_url) # assign the group a role on the system member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) # list domain role assignments response = self.get( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % { 'domain_id': group['domain_id'], 'group_id': group['id'] } ) self.assertEqual(len(response.json_body['roles']), 1) # list system role assignments collection_url = '/system/groups/%(group_id)s/roles' % { 'group_id': group['id'] } response = self.get(collection_url) # assert the domain role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], domain_role_id) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) def test_check_group_has_system_role_when_assignment_exists(self): system_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) # check the group has the system role assignment self.head(member_url) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( response.json_body['role_assignments'][0]['role']['id'], system_role_id ) def test_check_group_does_not_have_system_role_without_assignment(self): system_role_id = self._create_new_role() group = self._create_group() # check the group does't have the system role assignment member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.head(member_url, expected_status=http.client.NOT_FOUND) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_unassign_system_role_from_group(self): system_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) # ensure the group has the role assignment self.head(member_url) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertEqual(len(response.json_body['role_assignments']), 1) self.assertValidRoleAssignmentListResponse(response) # remove the system role assignment from the group self.delete(member_url) # ensure the group doesn't have any system role assignments collection_url = '/system/groups/%(group_id)s/roles' % { 'group_id': group['id'] } response = self.get(collection_url) self.assertEqual(len(response.json_body['roles']), 0) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % { 'group_id': group['id'] } ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_query_for_role_id_does_not_return_system_group_roles(self): system_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system member_url = '/system/groups/%(group_id)s/roles/%(role_id)s' % { 'group_id': group['id'], 'role_id': system_role_id } self.put(member_url) # assign the group a role on the system member_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' % {'project_id': self.project_id, 'group_id': group['id'], 'role_id': self.role_id} ) self.put(member_url) # Make sure we only get one role assignment back since the system role # assignment shouldn't be returned. path = ( '/role_assignments?role.id=%(role_id)s&group.id=%(group_id)s' ) % {'role_id': self.role_id, 'group_id': group['id']} response = self.get(path) self.assertValidRoleAssignmentListResponse(response, expected_length=1)
1.851563
2
setup.py
zhanghang1989/notedown
0
102
<gh_stars>0 from setuptools import setup # create __version__ exec(open('./_version.py').read()) setup( name="notedown", version=__version__, description="Convert markdown to IPython notebook.", author="<NAME>", author_email='<EMAIL>', url='http://github.com/aaren/notedown', install_requires=['ipython', ], entry_points={ 'console_scripts': [ 'notedown = notedown:cli', ], } )
1.375
1
multithreaded_webcrawler.py
the-muses-ltd/Multithreaded-Webcrawler-Cassandra-
0
103
# This is a reusable webcraawler architecture that can be adapted to scrape any webstie. # RESULTS: # Roughly 24 seconds per thousand courses scraped for ThreadPoolExecutor vs 63s for unthreaded script. # This is a very basic implementation of multithreading in order to show the proof of concept, but is a good base to build off of. import requests from bs4 import BeautifulSoup import csv from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor import time import logging from mitopencourseware_crawler_worker import mit_crawler def courses_spider(max_pages): data_to_csv = [] #holds all data to send to csv print("Webcrawler workers have started, please wait while we finish crawling...") # remove max pages loop (unecessary) page = 1 while page <= max_pages: url = 'https://ocw.mit.edu/courses/' source_code = requests.get(url) plain_text = source_code.text soup = BeautifulSoup(plain_text, 'html.parser') # Multithread only the work: # Tuning is required to find the most efficient amount of workers in the thread pool. with ThreadPoolExecutor(max_workers=30) as executor: start = time.time() futures = [ executor.submit(work, link) for link in soup.findAll('h4', {'class': 'course_title'}, limit=100) ] data_to_csv = [] for result in as_completed(futures): data_to_csv.append(result.result()) end = time.time() print("Time Taken to complete: {:.6f}s".format(end-start)) print("Courses extracted: ", len(data_to_csv)) page += 1 export_to_csv(data_to_csv) def work(link): # replace this fucntion with the specific crawler you want to use: return mit_crawler(link) # Exports data to a formatted csv file, this will be replaced with multithreaded API calls to the Cassandra Prisma Database # or on the cloud in production, it will be sent to the S3 temporary database to be picked up by the AWS Lambda funtion which will push it to the Cassandra Database def export_to_csv(csv_data): with open('web_crawl_data.csv',mode='w') as csv_file: field_names = ['Title','URL extension','External Website Logo','URL(href)','Description','Course logo URL'] csv_writer = csv.DictWriter(csv_file, fieldnames=field_names)#delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) csv_writer.writeheader() for course in csv_data: course_data = { 'Title':course[0], 'URL extension':course[1], 'External Website Logo':course[2], 'URL(href)':course[3], 'Description':course[4], 'Course logo URL':course[5], } csv_writer.writerow(course_data)
3.28125
3
genyrator/entities/Template.py
jumblesale/genyrator
1
104
from typing import List, Optional, NewType, Tuple, NamedTuple, Type import attr from jinja2 import Template as JinjaTemplate, StrictUndefined from genyrator.entities.Entity import Entity from genyrator.path import create_relative_path OutPath = NewType('OutPath', Tuple[List[str], str]) Import = NamedTuple('Import', [('module_name', str), ('imports', List[str]), ]) @attr.s class Template(object): template_name: str = attr.ib() template_file_name: str = attr.ib() template_file_path: List[str] = attr.ib() relative_path: List[str] = attr.ib() out_path: Optional[OutPath] = attr.ib() def create_template(self): path = create_relative_path( [*self.template_file_path, self.template_file_name] ) with open(path) as f: template = JinjaTemplate(f.read(), undefined=StrictUndefined) return template def render(self): return self.create_template().render(template=self) def create_template( constructor, template_path: Optional[List[str]] = None, out_path: Optional[OutPath] = None, **kwargs, ) -> Template: relative_path = template_path[0:-1] path = ['genyrator', 'templates'] + relative_path template_name = template_path[-1] return constructor( template_name=template_name, template_file_name='{}.j2'.format(template_name), template_file_path=path, out_path=out_path, relative_path=relative_path, **kwargs, ) @attr.s class RootInit(Template): db_import_path: str = attr.ib() module_name: str = attr.ib() @attr.s class RootSchema(Template): module_name: str = attr.ib() entities: List[Entity] = attr.ib() @attr.s class ConvertDict(Template): module_name: str = attr.ib() @attr.s class SQLAlchemyModel(Template): module_name: str = attr.ib() db_import_path: str = attr.ib() entity: Entity = attr.ib() @attr.s class ModelToDict(Template): module_name: str = attr.ib() @attr.s class Config(Template): module_name: str = attr.ib() @attr.s class SQLAlchemyModelInit(Template): module_name: str = attr.ib() db_import_path: str = attr.ib() imports: List[Import] = attr.ib() @attr.s class RestplusModel(Template): entity: Entity = attr.ib() @attr.s class Resource(Template): module_name: str = attr.ib() db_import_path: str = attr.ib() entity: Entity = attr.ib() restplus_template: str = attr.ib() TypeOption: Type = attr.ib() @attr.s class ResourcesInit(Template): entities: List[Entity] = attr.ib() module_name: str = attr.ib() api_name: str = attr.ib() api_description: str = attr.ib() @attr.s class DomainModel(Template): entity: Entity = attr.ib() module_name: str = attr.ib() def sqlalchemy_model_imports(self): return list(set([ rel.target_entity_class_name for rel in self.entity.relationships ])) @attr.s class ConvertProperties(Template): module_name: str = attr.ib() @attr.s class ConvertModels(Template): module_name: str = attr.ib() @attr.s class JoinEntities(Template): module_name: str = attr.ib() @attr.s class ConvertDictToMarshmallow(Template): module_name: str = attr.ib() db_import_path: str = attr.ib() @attr.s class Fixture(Template): db_import_path: str = attr.ib() module_name: str = attr.ib() entity: Entity = attr.ib()
2.234375
2
MIDI Remote Scripts/Push2/mode_collector.py
aarkwright/ableton_devices
0
105
# uncompyle6 version 3.3.5 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)] # Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\mode_collector.py # Compiled at: 2018-11-30 15:48:11 from __future__ import absolute_import, print_function, unicode_literals from ableton.v2.base import listenable_property, listens, EventObject class ModeCollector(EventObject): def __init__(self, main_modes=None, mix_modes=None, global_mix_modes=None, device_modes=None, *a, **k): super(ModeCollector, self).__init__(*a, **k) self._main_modes = main_modes self._mix_modes = mix_modes self._global_mix_modes = global_mix_modes self._device_modes = device_modes self._on_selected_main_mode_changed.subject = main_modes self._on_selected_mix_mode_changed.subject = mix_modes self._on_selected_global_mix_mode_changed.subject = global_mix_modes self._on_selected_device_mode_changed.subject = device_modes @listenable_property def main_mode(self): return self._main_modes.selected_mode @listens(b'selected_mode') def _on_selected_main_mode_changed(self, mode): self.notify_main_mode() @listenable_property def mix_mode(self): return self._mix_modes.selected_mode @listens(b'selected_mode') def _on_selected_mix_mode_changed(self, mode): self.notify_mix_mode() @listenable_property def global_mix_mode(self): return self._global_mix_modes.selected_mode @listens(b'selected_mode') def _on_selected_global_mix_mode_changed(self, mode): self.notify_global_mix_mode() @listenable_property def device_mode(self): return self._device_modes.selected_mode @listens(b'selected_mode') def _on_selected_device_mode_changed(self, mode): self.notify_device_mode()
1.859375
2
src/topicModel.py
daidaotong/SingleView
0
106
<filename>src/topicModel.py from gensim import corpora, models, similarities, matutils,utils from gensim.models import KeyedVectors import numpy as np #Word2vec Experiment testString = ['PAST_MEDICAL_HISTORY','PAST_SURGICAL_HISTORY','PHYSICAL_EXAMINATION'] ''' word_vectors = KeyedVectors.load_word2vec_format('~/Downloads/GoogleNews-vectors-negative300.bin', binary=True) #model.save("file.txt") print word_vectors.most_similar(positive=['woman', 'king'], negative=['man']) print "******************************************************" print word_vectors.similarity('woman', 'man') #print word_vectors.most_similar(positive=['san_francisco']) print word_vectors.most_similar(positive=['SURGICAL']) #word_vectors.similarity(testString[0],testString[1]) ''' a=[1,4,3,6,3,6] print a[:-1] #print zip(a[:-1],a[1:]) print np.random.randn(3, 2)
3.078125
3
src/bots/test/test_inputs.py
drewbitt/lightnovel-crawler
1
107
<reponame>drewbitt/lightnovel-crawler from base64 import decodestring as b64decode allowed_failures = [ 'https://ranobelib.me/', 'https://www.aixdzs.com/', 'https://webnovelindonesia.com/', b64decode("aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS8=".encode()).decode() ] test_user_inputs = { b64decode("aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS8=".encode()).decode(): [ b64decode( "aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS9ub3ZlbC90c3VydWdpLW5vLWpvb3UtdG8tcmFrdWluLW5vLWtvLw==".encode()).decode() ], 'https://novelsrock.com/': [ 'https://novelsrock.com/novel/the-returner/', 'kuro' ], 'http://gravitytales.com/': [ 'http://gravitytales.com/posts/novel/a-dragons-curiosity' ], 'http://novelfull.com/': [ 'http://novelfull.com/dungeon-defense.html', 'Sinister Ex Girlfriend', ], 'http://www.machinenoveltranslation.com/': [ 'http://www.machinenoveltranslation.com/a-thought-through-eternity', ], 'http://zenithnovels.com/': [ 'http://zenithnovels.com/infinity-armament/', ], 'https://anythingnovel.com/': [ 'https://anythingnovel.com/novel/king-of-gods/', ], 'https://boxnovel.com/': [ 'https://boxnovel.com/novel/the-rest-of-my-life-is-for-you/', 'cultivation chat', ], 'https://crescentmoon.blog/': [ 'https://crescentmoon.blog/dark-blue-and-moonlight/', ], 'https://litnet.com/': [ 'https://litnet.com/en/book/candy-lips-1-b106232', 'candy lips', ], 'https://lnmtl.com/': [ 'https://lnmtl.com/novel/the-strongest-dan-god', ], 'https://m.chinesefantasynovels.com/': [ 'https://m.chinesefantasynovels.com/3838/', ], 'https://m.novelspread.com/': [ 'https://m.novelspread.com/novel/the-legend-of-the-concubine-s-daughter-minglan', ], 'https://m.romanticlovebooks.com/': [ 'https://m.romanticlovebooks.com/xuanhuan/207.html', ], 'http://www.tiknovel.com/': [ 'http://www.tiknovel.com/book/index?id=717', ], 'https://www.wuxiaworld.co/': [ 'sword', ], 'https://m.wuxiaworld.co/': [ 'https://m.wuxiaworld.co/Reincarnation-Of-The-Strongest-Sword-God/', ], 'https://meionovel.id/': [ 'https://meionovel.id/novel/the-legendary-mechanic/', ], 'https://mtled-novels.com/': [ 'https://mtled-novels.com/novels/great-ruler/', 'great ruler' ], 'https://bestlightnovel.com/': [ 'https://bestlightnovel.com/novel_888103800', 'martial' ], 'https://novelplanet.com/': [ 'https://novelplanet.com/Novel/Returning-from-the-Immortal-World', 'immortal' ], 'https://www.volarenovels.com/': [ 'https://www.volarenovels.com/novel/adorable-creature-attacks', ], 'https://webnovel.online/': [ 'https://webnovel.online/full-marks-hidden-marriage-pick-up-a-son-get-a-free-husband', ], 'https://www.idqidian.us/': [ 'https://www.idqidian.us/novel/peerless-martial-god/' ], 'https://www.novelall.com/': [ 'https://www.novelall.com/novel/Virtual-World-Close-Combat-Mage.html', 'combat' ], 'https://www.novelspread.com/': [ 'https://www.novelspread.com/novel/the-legend-of-the-concubine-s-daughter-minglan' ], 'https://www.readlightnovel.org/': [ 'https://www.readlightnovel.org/top-furious-doctor-soldier' ], 'https://www.romanticlovebooks.com/': [ 'https://www.romanticlovebooks.com/xianxia/251.html' ], 'https://www.royalroad.com/': [ 'https://www.royalroad.com/fiction/21220/mother-of-learning', 'mother' ], 'https://www.scribblehub.com/': [ 'https://www.scribblehub.com/series/73550/modern-life-of-the-exalted-immortal/', 'cultivation' ], 'https://www.webnovel.com/': [ 'https://www.webnovel.com/book/8212987205006305/Trial-Marriage-Husband%3A-Need-to-Work-Hard', 'martial', ], 'https://www.worldnovel.online/': [ 'https://www.worldnovel.online/novel/solo-leveling/', ], 'https://www.wuxiaworld.co/': [ 'https://www.wuxiaworld.co/Reincarnation-Of-The-Strongest-Sword-God/', 'sword' ], 'https://rewayat.club/': [ 'https://rewayat.club/novel/almighty-sword-domain/' ], 'https://www.wuxiaworld.com/': [ 'https://www.wuxiaworld.com/novel/martial-god-asura', 'martial', ], 'https://creativenovels.com/': [ 'https://creativenovels.com/novel/eternal-reverence/', ], 'https://www.tapread.com/': [ 'https://www.tapread.com/book/detail/80', ], 'http://www.tapread.com/': [ 'http://www.tapread.com/book/detail/80', ], 'https://readnovelfull.com/': [ 'https://readnovelfull.com/lord-of-all-realms.html', 'cultivation' ], 'https://myoniyonitranslations.com/': [ 'https://myoniyonitranslations.com/top-management/', 'https://myoniyonitranslations.com/category/god-of-tennis', ], 'https://babelnovel.com/': [ 'https://babelnovel.com/books/ceo-let-me-go', 'dazzle Good' ], 'https://wuxiaworld.online/': [ 'https://wuxiaworld.online/trial-marriage-husband-need-to-work-hard', 'cultivation', ], 'https://www.novelv.com/': [ 'https://www.novelv.com/0/349/' ], 'http://fullnovel.live/': [ 'http://fullnovel.live/novel-a-will-eternal', 'will eternal', ], 'https://www.noveluniverse.com/': [ 'https://www.noveluniverse.com/index/novel/info/id/15.html' ], 'https://novelraw.blogspot.com/': [ 'https://novelraw.blogspot.com/2019/03/dragon-king-son-in-law-mtl.html' ], 'https://light-novel.online/': [ 'https://light-novel.online/great-tyrannical-deity', 'tyrannical' ], 'https://www.rebirth.online/': [ 'https://www.rebirth.online/novel/upside-down' ], 'https://www.jieruihao.cn/': [ 'https://www.jieruihao.cn/novel/against-the-gods/', ], 'https://www.wattpad.com/': [ 'https://www.wattpad.com/story/87505567-loving-mr-jerkface-%E2%9C%94%EF%B8%8F' ], 'https://novelgo.id/': [ 'https://novelgo.id/novel/the-mightiest-leveling-system/' ], 'https://yukinovel.me/': [ 'https://yukinovel.me/novel/the-second-coming-of-avarice/', ], 'https://www.asianhobbyist.com/': [ 'https://www.asianhobbyist.com/series/that-time-i-got-reincarnated-as-a-slime/' ], 'https://kisslightnovels.info/': [ 'https://kisslightnovels.info/novel/solo-leveling/' ], 'https://novelonlinefull.com/': [ 'https://novelonlinefull.com/novel/abo1520855001564322110' ], 'https://www.machine-translation.org/': [ 'https://www.machine-translation.org/novel/bace21c9b10d34e9/world-of-cultivation.html' ], 'https://www.fanfiction.net/': [ 'https://www.fanfiction.net/s/7268451/1/Facebook-For-wizards' ], 'https://www.mtlnovel.com/': [ 'https://www.mtlnovel.com/trapped-in-a-typical-idol-drama/' ], 'https://wordexcerpt.com/': [ 'https://wordexcerpt.com/series/transmigration-raising-the-child-of-the-male-lead-boss/' ], 'https://www.translateindo.com/': [ 'https://www.translateindo.com/demon-wang-golden-status-favoured-fei/' ], 'https://ranobelib.me/': [ 'https://ranobelib.me/sozvezdie-klinka' ], 'https://novelringan.com/': [ 'https://novelringan.com/series/the-most-loving-marriage-in-history-master-mus-pampered-wife/' ], 'https://wuxiaworld.site/': [ 'https://wuxiaworld.site/novel/only-i-level-up/' ], 'https://id.mtlnovel.com/': [ 'https://id.mtlnovel.com/the-strongest-plane-becomes-god/' ], 'https://www.shinsori.com/': [ 'https://www.shinsori.com/akuyaku-reijou-ni-nanka-narimasen/' ], 'https://www.flying-lines.com/': [ 'https://www.flying-lines.com/novel/one-useless-rebirth' ], 'https://book.qidian.com/': [ 'https://book.qidian.com/info/1016597088' ], 'https://kiss-novel.com/': [ 'https://kiss-novel.com/the-first-order' ], 'https://www.machine-translation.org/': [ 'https://www.machine-translation.org/novel/a5eee127d75da0d2/long-live-summons.html' ], 'https://www.aixdzs.com/': [ 'https://www.aixdzs.com/d/66/66746/' ], 'https://webnovelonline.com/': [ 'https://webnovelonline.com/novel/the_anarchic_consort' ], 'https://4scanlation.com/': [ 'https://4scanlation.com/tensei-shitara-slime-datta-ken-wn/' ], 'https://listnovel.com/': [ 'https://listnovel.com/novel/my-sassy-crown-princess/' ], 'https://tomotranslations.com/': [ 'https://tomotranslations.com/this-hero-is-invincible-but-too-cautious/' ], 'https://www.wuxialeague.com/': [ 'https://www.wuxialeague.com/novel/245/' ], 'http://liberspark.com/': [ 'http://liberspark.com/novel/black-irons-glory' ], 'https://webnovelindonesia.com/': [ 'https://webnovelindonesia.com/nv/almighty-student' ], 'https://webnovelindonesia.com/': [ 'https://webnovelindonesia.com/nv/almighty-student' ], 'http://tiknovel.com/': [ 'http://tiknovel.com/book/index?id=717' ], 'http://boxnovel.org/': [ 'http://boxnovel.org/novel/martial-god-asura' ] }
1.953125
2
wikisourcesort.py
ostropunk/wikisourcesort
0
108
<reponame>ostropunk/wikisourcesort #!/usr/bin/env python # coding: utf-8 # In[1]: import pandas as pd import re # In[2]: def get_excel_dict(excelfile, key=None, index_col=0, header=0): dataframe = pd.read_excel(excelfile, index_col=index_col, header=header) dictionary = dataframe.to_dict() if key is None: return dictionary else: return dictionary[key] # In[3]: def textreader(text): '''Opens textfile and returns the content as a string''' with open(text, 'rt', encoding="utf8") as wiki: txtstring = wiki.read() return txtstring # In[44]: def replace_from_dict(text, dictionary): '''Replaces words in text with new words in dictionary''' for word in dictionary: text = text.replace(word, dictionary[word]) return text # In[172]: def get_ref(text): ''' Finds references between the <ref>- and </ref>-tags and returns them as a list of strings ''' ref = re.findall("\<ref.+?\<\/ref\>", text) return ref # In[171]: def getrefurl(ref): '''Finds the reference url in references and returns it as a string''' url = re.search("http.+?(?=\s|\|title=|\|titel|\}\})", ref) url = url.group() return url # In[30]: def get_domain_name(url): ''' Finds the domain name of the reference url and returns that name as a string. ''' domain_name = re.search('(?<=\/\/).+?(?=\/)', url) domain_name = domain_name.group() if domain_name.startswith('www.'): domain_name = domain_name.replace('www.', '') return domain_name # In[32]: def update_ref_dict(ref, ref_dict, ref_counts): refurl = getrefurl(ref) domain_name = get_domain_name(refurl) if refurl not in ref_dict: if domain_name not in ref_counts: ref_counts.update({domain_name:1}) refname = domain_name + '.' + str(ref_counts[domain_name]) else: ref_counts[domain_name] = ref_counts[domain_name] + 1 refname = domain_name + '.' + str(ref_counts[domain_name]) ref_dict.update({refurl:{'refs': [ref], 'refname': refname, 'refurl': refurl}}) else: if ref not in ref_dict[refurl]['refs']: ref_dict[refurl]['refs'].append(ref) return ref_dict, ref_counts # In[36]: def create_ref_dict(refs): ''' Takes a list of references, extracts the reference url and name, and returns a dictionary sorted on the referenceurl as key. ''' ref_dict = {} ref_counts = {} for ref in refs: ref_dict, ref_counts = update_ref_dict(ref, ref_dict, ref_counts) return ref_dict # In[79]: def get_ref_tag(text): ''' Finds references between the <ref>- and </ref>-tags and returns them as a list of strings ''' ref = re.findall("\<ref name\=.+?\/\>", text) #ref = re.findall("\<ref.+?\<\/ref\>|\<ref name\=.+?\/\>", text) #ref = re.findall("\<ref.+?(?!\"\s\/\>)\<\/ref>", text) #ref = re.findall("\<ref.+?\<\/ref\>", text) return set(ref) # In[130]: def get_spec_ref(text, ref_tag): ''' Finds references between the <ref>- and </ref>-tags and returns them as a list of strings ''' #ref = re.findall("\<ref name\=.+?\/\>", text) #ref = re.findall("\<ref.+?\<\/ref\>|\<ref name\=.+?\/\>", text) #ref = re.findall("\<ref.+?(?!\"\s\/\>)\<\/ref>", text) ref = re.findall(f'\<ref name\=\"{ref_tag}\"\>.+?\<\/ref\>', text) ref = ref[0] return ref # In[115]: def get_ref_tag_name(ref_tag): ref_tag_name = re.findall('\".+\"', ref_tag) ref_tag_name = ref_tag_name[0].replace('"', '') return ref_tag_name # In[136]: def replace_tags(text): ref_tags = get_ref_tag(text) for tag in ref_tags: name = get_ref_tag_name(tag) spec_ref = get_spec_ref(text, name) text = text.replace(tag, spec_ref) return text # In[49]: def replace_countries(text): countries = get_excel_dict('countries2.xlsx', 'Länder') text = replace_from_dict(text, countries) return text # In[66]: def replace_headers(text): headers = {'English title':'Engelsk titel', 'Original title':'Originaltitel', 'Director(s)':'Regissör(er)', 'Country':'Land', 'School':'Skola'} text = replace_from_dict(text, headers) return text # In[169]: def reference_sorter(text): ''' Does a bunch of stuff that should be broken out in different functions. ''' references = get_ref(text) reference_dict = create_ref_dict(references) reference_list = [] reference_text = '== Referenser ==\n<references>\n' text = text.replace('== Källor ==', '== Referenser ==') text = text.replace('<references/>', '') for entry in reference_dict: for reference in reference_dict[entry]['refs']: text = text.replace(reference, '<ref name="{}" />'.format(reference_dict[entry]['refname'])) reference_list.append('<ref name="{}">{}</ref>'.format(reference_dict[entry]['refname'], entry)) for reference in reference_list: reference_text += reference +'\n' reference_text += '</references>' text = re.split('== Referenser ==', text) text = text[0] + reference_text + text[-1] return text # In[134]: def fix_wiki_entry(textfile): with open(textfile, 'r', encoding="utf8") as txt: text = txt.read() text = replace_tags(text) text = reference_sorter(text) text = replace_countries(text) text = replace_headers(text) with open('new_' + textfile, 'w', encoding='utf8') as new_text: new_text.write(text) return text # In[173]: def main(): fix_wiki_entry(input('Please enter input textfile:')) if __name__ == "__main__": main()
3.28125
3
backend/radar/engine/body_objects.py
me-anton/radar-app
0
109
<gh_stars>0 import logging import json from dataclasses import dataclass from redis import Redis from typing import Iterable, Tuple, List, Iterator, Union, Dict from typing_extensions import TypedDict from backend import settings from caching.scripts import RedisScriptsPool from share.metaclasses import Singleton from radar.models import AlienBody from radar.validation import validate_body_str_profile logger = logging.getLogger(__name__) BodiesUpdate = TypedDict('BodiesUpdate', {'dropped_keys': List[str], 'new_records': Dict[str, str]}) @dataclass(frozen=True) class BodyObject: key: str matrix: List[List[str]] width: int height: int @staticmethod def generate(key: str, body: str) -> 'BodyObject': line_list = body.splitlines() matrix = [list(line) for line in line_list] return BodyObject(key=key, matrix=matrix, width=len(matrix[0]), height=len(matrix)) class BodyObjectsPool(metaclass=Singleton): """ An object for getting BodyObject instances from database or cache """ body_key_prefix = 'body:' body_lookup_pattern = body_key_prefix + '*' body_expiration = 10 # in seconds def __init__(self, num_of_default_bodies=3): self.num_of_default_bodies = num_of_default_bodies self.__default_bodies: Tuple[BodyObject, ...] = \ self._generate_defaults(num_of_default_bodies) self._redis = Redis(host=settings.REDIS_HOSTNAME) self._scripts = RedisScriptsPool() def add_body(self, body: Union[str, bytes], body_id: str) -> None: """Cache the requested body string in Redis db""" validate_body_str_profile(body) key = self.make_body_key(body_id) self._redis.set(key, body, self.body_expiration) def ping_body(self, body_id: str): """Reset expiration time of a body""" key = self.make_body_key(body_id) self._redis.expire(key, self.body_expiration) def update_bodies(self, known_bodies_keys: Iterable[str], max_capacity: int) -> BodiesUpdate: """ Give update on state of body objects' records in Redis db :param known_bodies_keys: redis keys of already known bodies :param max_capacity: maximum relevant for requester number of bodies including already known ones """ return json.loads( self._scripts.update_records(keys=known_bodies_keys, args=[max_capacity, self.body_lookup_pattern]) ) def make_body_key(self, body_id: str): return self.body_key_prefix + body_id @property def first(self): return self._get_default(0) @property def second(self): return self._get_default(1) @property def third(self): return self._get_default(2) def _get_default(self, index) -> BodyObject: return self.__default_bodies[index] @staticmethod def _generate_defaults(num_of_defaults): logger.info('Generating default bodies') query = AlienBody.objects.filter(id__lte=num_of_defaults) return tuple(BodyObject.generate(str(body.id), body.body_str) for body in query)
2.21875
2
djangocms_baseplugins/spacer/cms_plugins.py
benzkji/djangocms-baseplugins
2
110
<filename>djangocms_baseplugins/spacer/cms_plugins.py # coding: utf-8 from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from django import forms from django.utils.translation import ugettext_lazy as _ from djangocms_baseplugins.baseplugin import defaults from djangocms_baseplugins.baseplugin.cms_plugins import BasePluginMixin from djangocms_baseplugins.baseplugin.utils import get_fields_from_fieldsets, get_baseplugin_widgets from . import conf from .models import Spacer class SpacerPluginForm(forms.ModelForm): class Meta: model = Spacer fields = get_fields_from_fieldsets(conf.FIELDSETS) # exclude = [] widgets = get_baseplugin_widgets(conf) class SpacerPlugin(BasePluginMixin, CMSPluginBase): model = Spacer form = SpacerPluginForm module = defaults.SPECIAL_LABEL name = _(u'Spacer') render_template = "djangocms_baseplugins/spacer.html" fieldsets = conf.FIELDSETS plugin_pool.register_plugin(SpacerPlugin)
1.8125
2
python_utilities/plotting/util.py
sdaxen/python_utilities
2
111
<reponame>sdaxen/python_utilities """Utility functions for plotting. Author: <NAME> E-mail: <EMAIL>""" from collections import deque import numpy as np def rgb_to_hsv(rgb): """Convert RGB colors to HSV colors.""" r, g, b = tuple(map(float, rgb)) if any([r > 1, g > 1, b > 1]): r /= 255. g /= 255. b /= 255. mmax = max(r, g, b) mmin = min(r, g, b) c = mmax - mmin if (c == 0.): hp = 0. elif (mmax == r): hp = ((g - b) / c) % 6 elif (mmax == g): hp = ((b - r) / c) + 2 elif (mmax == b): hp = ((r - g) / c) + 4 h = 60 * hp v = mmax if (c == 0): s = 0 else: s = c / v return (h, s, v) def hsv_to_rgb(hsv): """Convert HSV colors to RGB colors.""" h, s, v = tuple(map(float, hsv)) c = v * s m = v - c hp = h / 60. x = c * (1. - abs((hp % 2) - 1.)) hp = int(hp) rgb = deque((c + m, x + m, m)) if (hp % 2): rgb.reverse() rgb.rotate((hp - 3) / 2) else: rgb.rotate(hp / 2) return tuple(rgb) def rgb_to_yuv(rgb): """Convert RGB colors to Y'UV colors, useful for comparison.""" rgbv = np.array(rgb).reshape(3, 1) if np.any(rgbv > 1.): rgbv = rgbv / 255. yuv = np.dot(np.array([[ .299, .587, .114], [-.14713, -.28886, .436], [ .615, -.51499, -.10001]], dtype=np.double), rgbv) return list(yuv) def yuv_to_rgb(yuv): """Convert Y'UV colors to RGB colors.""" yuvv = np.array(yuv).reshape(3, 1) rgb = np.dot(np.array([[1., 0., 1.13983], [1., -.39465, -.58060], [1., 2.03211, 0.]], dtype=np.double), yuvv) return list(rgb) def compute_yuv_dist(rgb1, rgb2): """Compute Euclidean Y'UV distance between RGB colors.""" yuv1 = rgb_to_yuv(rgb1) yuv2 = rgb_to_yuv(rgb2) return float(sum((np.array(yuv1) - np.array(yuv2))**2)**.5) def lighten_rgb(rgb, p=0.): """Lighten RGB colors by percentage p of total.""" h, s, v = rgb_to_hsv(rgb) hsv = (h, s, min(1, v + p)) return hsv_to_rgb(hsv)
3.203125
3
tests/test_process.py
confluentinc/utils-core
0
112
<filename>tests/test_process.py import pytest from utils.process import run, silent_run, RunError from utils.fs import in_temp_dir def test_run(capsys): with in_temp_dir(): assert run('echo hello > hello.txt; echo world >> hello.txt', shell=True) out = run('ls', return_output=True) assert out == 'hello.txt\n' out = run(['cat', 'hello.txt'], return_output=True) assert out == 'hello\nworld\n' with pytest.raises(RunError): run('blah') assert not run('blah', raises=False) assert silent_run('ls -l') out, _ = capsys.readouterr() assert out == ''
2.34375
2
bokeh/client/util.py
areaweb/bokeh
1
113
<filename>bokeh/client/util.py<gh_stars>1-10 #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Internal utility functions used by ``bokeh.client`` ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) from bokeh.util.api import public, internal ; public, internal #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports # Bokeh imports #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Public API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Internal API #----------------------------------------------------------------------------- @internal((1,0,0)) def server_url_for_websocket_url(url): ''' Convert an ``ws(s)`` URL for a Bokeh server into the appropriate ``http(s)`` URL for the websocket endpoint. Args: url (str): An ``ws(s)`` URL ending in ``/ws`` Returns: str: The corresponding ``http(s)`` URL. Raises: ValueError: If the input URL is not of the proper form. ''' if url.startswith("ws:"): reprotocoled = "http" + url[2:] elif url.startswith("wss:"): reprotocoled = "https" + url[3:] else: raise ValueError("URL has non-websocket protocol " + url) if not reprotocoled.endswith("/ws"): raise ValueError("websocket URL does not end in /ws") return reprotocoled[:-2] @internal((1,0,0)) def websocket_url_for_server_url(url): ''' Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into the appropriate ``ws(s)`` URL Args: url (str): An ``http(s)`` URL Returns: str: The corresponding ``ws(s)`` URL ending in ``/ws`` Raises: ValueError: If the input URL is not of the proper form. ''' if url.startswith("http:"): reprotocoled = "ws" + url[4:] elif url.startswith("https:"): reprotocoled = "wss" + url[5:] else: raise ValueError("URL has unknown protocol " + url) if reprotocoled.endswith("/"): return reprotocoled + "ws" else: return reprotocoled + "/ws" #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
1.960938
2
slim/nets/inception_resnet_v2.py
PPTMiao/mtl-ssl
90
114
<filename>slim/nets/inception_resnet_v2.py # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the definition of the Inception Resnet V2 architecture. As described in http://arxiv.org/abs/1602.07261. Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning <NAME>, <NAME>, <NAME>, <NAME> """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf slim = tf.contrib.slim def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 35x35 resnet block.""" with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 17x17 resnet block.""" with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7') tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 8x8 resnet block.""" with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3') tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net def inception_resnet_v2_base(inputs, final_endpoint='Conv2d_7b_1x1', output_stride=16, align_feature_maps=False, scope=None): """Inception model from http://arxiv.org/abs/1602.07261. Constructs an Inception Resnet v2 network from inputs to the given final endpoint. This method can construct the network up to the final inception block Conv2d_7b_1x1. Args: inputs: a tensor of size [batch_size, height, width, channels]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1'] output_stride: A scalar that specifies the requested ratio of input to output spatial resolution. Only supports 8 and 16. align_feature_maps: When true, changes all the VALID paddings in the network to SAME padding so that the feature maps are aligned. scope: Optional variable_scope. Returns: tensor_out: output tensor corresponding to the final_endpoint. end_points: a set of activations for external use, for example summaries or losses. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or if the output_stride is not 8 or 16, or if the output_stride is 8 and we request an end point after 'PreAuxLogits'. """ if output_stride != 8 and output_stride != 16: raise ValueError('output_stride must be 8 or 16.') padding = 'SAME' if align_feature_maps else 'VALID' end_points = {} def add_and_check_final(name, net): end_points[name] = net return name == final_endpoint with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): # 149 x 149 x 32 net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding, scope='Conv2d_1a_3x3') if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points # 147 x 147 x 32 net = slim.conv2d(net, 32, 3, padding=padding, scope='Conv2d_2a_3x3') if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points # 147 x 147 x 64 net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3') if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points # 73 x 73 x 64 net = slim.max_pool2d(net, 3, stride=2, padding=padding, scope='MaxPool_3a_3x3') if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points # 73 x 73 x 80 net = slim.conv2d(net, 80, 1, padding=padding, scope='Conv2d_3b_1x1') if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points # 71 x 71 x 192 net = slim.conv2d(net, 192, 3, padding=padding, scope='Conv2d_4a_3x3') if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points # 35 x 35 x 192 net = slim.max_pool2d(net, 3, stride=2, padding=padding, scope='MaxPool_5a_3x3') if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points # 35 x 35 x 320 with tf.variable_scope('Mixed_5b'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5, scope='Conv2d_0b_5x5') with tf.variable_scope('Branch_2'): tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3, scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME', scope='AvgPool_0a_3x3') tower_pool_1 = slim.conv2d(tower_pool, 64, 1, scope='Conv2d_0b_1x1') net = tf.concat( [tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3) if add_and_check_final('Mixed_5b', net): return net, end_points # TODO(alemi): Register intermediate endpoints net = slim.repeat(net, 10, block35, scale=0.17) # 17 x 17 x 1088 if output_stride == 8, # 33 x 33 x 1088 if output_stride == 16 use_atrous = output_stride == 8 with tf.variable_scope('Mixed_6a'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2, padding=padding, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3, scope='Conv2d_0b_3x3') tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3, stride=1 if use_atrous else 2, padding=padding, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2, padding=padding, scope='MaxPool_1a_3x3') net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3) if add_and_check_final('Mixed_6a', net): return net, end_points # TODO(alemi): register intermediate endpoints with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1): net = slim.repeat(net, 20, block17, scale=0.10) if add_and_check_final('PreAuxLogits', net): return net, end_points if output_stride == 8: # TODO(gpapan): Properly support output_stride for the rest of the net. raise ValueError('output_stride==8 is only supported up to the ' 'PreAuxlogits end_point for now.') # 8 x 8 x 2080 with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2, padding=padding, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2, padding=padding, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2, padding=padding, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_3'): tower_pool = slim.max_pool2d(net, 3, stride=2, padding=padding, scope='MaxPool_1a_3x3') net = tf.concat( [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) if add_and_check_final('Mixed_7a', net): return net, end_points # TODO(alemi): register intermediate endpoints net = slim.repeat(net, 9, block8, scale=0.20) net = block8(net, activation_fn=None) # 8 x 8 x 1536 net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1') if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points raise ValueError('final_endpoint (%s) not recognized', final_endpoint) def inception_resnet_v2(inputs, num_classes=1001, is_training=True, dropout_keep_prob=0.8, reuse=None, scope='InceptionResnetV2', create_aux_logits=True): """Creates the Inception Resnet V2 model. Args: inputs: a 4-D tensor of size [batch_size, height, width, 3]. num_classes: number of predicted classes. is_training: whether is training or not. dropout_keep_prob: float, the fraction to keep before final layer. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. create_aux_logits: Whether to include the auxilliary logits. Returns: logits: the logits outputs of the model. end_points: the set of end_points from the inception model. """ end_points = {} with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, num_classes], reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): net, end_points = inception_resnet_v2_base(inputs, scope=scope) if create_aux_logits: with tf.variable_scope('AuxLogits'): aux = end_points['PreAuxLogits'] aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID', scope='Conv2d_1a_3x3') aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1') aux = slim.conv2d(aux, 768, aux.get_shape()[1:3], padding='VALID', scope='Conv2d_2a_5x5') aux = slim.flatten(aux) aux = slim.fully_connected(aux, num_classes, activation_fn=None, scope='Logits') end_points['AuxLogits'] = aux with tf.variable_scope('Logits'): net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID', scope='AvgPool_1a_8x8') net = slim.flatten(net) net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='Dropout') end_points['PreLogitsFlatten'] = net logits = slim.fully_connected(net, num_classes, activation_fn=None, scope='Logits') end_points['Logits'] = logits end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions') return logits, end_points inception_resnet_v2.default_image_size = 299 def inception_resnet_v2_arg_scope(weight_decay=0.00004, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, trainable=True): """Returns the scope with the default parameters for inception_resnet_v2. Args: weight_decay: the weight decay for weights variables. batch_norm_decay: decay for the moving average of batch_norm momentums. batch_norm_epsilon: small float added to variance to avoid dividing by zero. Returns: a arg_scope with the parameters needed for inception_resnet_v2. """ # Set weight_decay for weights in conv2d and fully_connected layers. with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), biases_regularizer=slim.l2_regularizer(weight_decay), trainable=trainable): batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'trainable': trainable } # Set activation_fn and parameters for batch_norm. with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as scope: return scope
2.328125
2
tests/boilerplate_client/boilerplate_cmd.py
LedgerHQ/ledger-app-neo3
0
115
<filename>tests/boilerplate_client/boilerplate_cmd.py<gh_stars>0 import struct from typing import Tuple from ledgercomm import Transport from boilerplate_client.boilerplate_cmd_builder import BoilerplateCommandBuilder, InsType from boilerplate_client.button import Button from boilerplate_client.exception import DeviceException from boilerplate_client.transaction import Transaction from neo3.network import payloads class BoilerplateCommand: def __init__(self, transport: Transport, debug: bool = False) -> None: self.transport = transport self.builder = BoilerplateCommandBuilder(debug=debug) self.debug = debug def get_app_and_version(self) -> Tuple[str, str]: sw, response = self.transport.exchange_raw( self.builder.get_app_and_version() ) # type: int, bytes if sw != 0x9000: raise DeviceException(error_code=sw, ins=0x01) # response = format_id (1) || # app_name_len (1) || # app_name (var) || # version_len (1) || # version (var) || offset: int = 0 format_id: int = response[offset] offset += 1 app_name_len: int = response[offset] offset += 1 app_name: str = response[offset:offset + app_name_len].decode("ascii") offset += app_name_len version_len: int = response[offset] offset += 1 version: str = response[offset:offset + version_len].decode("ascii") offset += version_len return app_name, version def get_version(self) -> Tuple[int, int, int]: sw, response = self.transport.exchange_raw( self.builder.get_version() ) # type: int, bytes if sw != 0x9000: raise DeviceException(error_code=sw, ins=InsType.INS_GET_VERSION) # response = MAJOR (1) || MINOR (1) || PATCH (1) assert len(response) == 3 major, minor, patch = struct.unpack( "BBB", response ) # type: int, int, int return major, minor, patch def get_app_name(self) -> str: sw, response = self.transport.exchange_raw( self.builder.get_app_name() ) # type: int, bytes if sw != 0x9000: raise DeviceException(error_code=sw, ins=InsType.INS_GET_APP_NAME) return response.decode("ascii") def get_public_key(self, bip44_path: str, display: bool = False) -> bytes: sw, response = self.transport.exchange_raw( self.builder.get_public_key(bip44_path=bip44_path) ) # type: int, bytes if sw != 0x9000: raise DeviceException(error_code=sw, ins=InsType.INS_GET_PUBLIC_KEY) assert len(response) == 65 # 04 + 64 bytes of uncompressed key return response def sign_tx(self, bip44_path: str, transaction: payloads.Transaction, network_magic: int, button: Button) -> Tuple[int, bytes]: sw: int response: bytes = b"" for is_last, chunk in self.builder.sign_tx(bip44_path=bip44_path, transaction=transaction, network_magic=network_magic): self.transport.send_raw(chunk) if is_last: # Review Transaction button.right_click() # Destination address button.right_click() button.right_click() button.right_click() # Token Amount button.right_click() # Target network button.right_click() # System fee button.right_click() # Network fee button.right_click() # Total fees button.right_click() # Valid until button.right_click() # Signer 1 of 1 button.right_click() # Account 1/3, 2/3, 3/3 button.right_click() button.right_click() button.right_click() # Scope button.right_click() # custom contracts if (len(transaction.signers) > 0 and payloads.WitnessScope.CUSTOM_CONTRACTS in transaction.signers[0].scope): for _ in range(len(transaction.signers[0].allowed_contracts)): button.right_click() button.right_click() button.right_click() # Approve button.both_click() sw, response = self.transport.recv() # type: int, bytes if sw != 0x9000: raise DeviceException(error_code=sw, ins=InsType.INS_SIGN_TX) return response def sign_vote_tx(self, bip44_path: str, transaction: Transaction, network_magic: int, button: Button) -> Tuple[int, bytes]: sw: int response: bytes = b"" for is_last, chunk in self.builder.sign_tx(bip44_path=bip44_path, transaction=transaction, network_magic=network_magic): self.transport.send_raw(chunk) if is_last: # Review Transaction button.right_click() # Vote to public key button.right_click() button.right_click() button.right_click() button.right_click() # Target network button.right_click() # System fee button.right_click() # Network fee button.right_click() # Total fees button.right_click() # Valid until button.right_click() # Signer 1 of 1 button.right_click() # Account 1/3, 2/3, 3/3 button.right_click() button.right_click() button.right_click() # Scope button.right_click() # Approve button.both_click() sw, response = self.transport.recv() # type: int, bytes if sw != 0x9000: raise DeviceException(error_code=sw, ins=InsType.INS_SIGN_TX) return response
2.109375
2
clpy/sparse/util.py
fixstars/clpy
142
116
<filename>clpy/sparse/util.py import clpy import clpy.sparse.base _preamble_atomic_add = ''' #if __CUDA_ARCH__ < 600 __device__ double atomicAdd(double* address, double val) { unsigned long long* address_as_ull = (unsigned long long*)address; unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif ''' def isintlike(x): try: return bool(int(x) == x) except (TypeError, ValueError): return False def isscalarlike(x): return clpy.isscalar(x) or (clpy.sparse.base.isdense(x) and x.ndim == 0) def isshape(x): if not isinstance(x, tuple) or len(x) != 2: return False m, n = x return isintlike(m) and isintlike(n)
2.453125
2
test/test_cartesian.py
hwazni/discopy
205
117
from pytest import raises from discopy.cartesian import * def test_Box_repr(): f = Box('f', 1, 2, lambda x: (x, x)) assert "Box('f', 1, 2" in repr(f) def test_Function_str(): f = Function(2, 1, lambda x, y: x + y) assert 'Function(dom=2, cod=1,' in str(f) def test_Function_call(): f = Swap(2, 1) values = (2, 3) with raises(TypeError) as err: f(*values) assert str(err.value) == messages.expected_input_length(f, values) def test_Function_then(): f, g = Function(2, 1, lambda x, y: x + y), Function(1, 1, lambda x: x + 1) assert Function.id(2).then(*(f, g))(20, 21) == 42 def test_Function_then_err(): f = Function(2, 1, lambda x, y: x + y) g = (lambda x: x, ) with raises(TypeError) as err: f >> g assert str(err.value) == messages.type_err(Function, g) g = Function.id(2) with raises(AxiomError) as err: f >> g assert str(err.value) == messages.does_not_compose(f, g) def test_Function_tensor(): assert Function.id(3)(1, 2, 3)\ == Function.id(0).tensor(*(3 * [Function.id(1)]))(1, 2, 3) def test_Function_tensor_err(): f = Function(2, 1, lambda x, y: x + y) g = (lambda x: x, ) with raises(TypeError) as err: f @ g assert str(err.value) == messages.type_err(Function, g)
2.375
2
source/browseMode.py
neal-hub/nvda-test
1
118
<filename>source/browseMode.py # A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2007-2021 NV Access Limited, Babbage B.V., <NAME>, <NAME>, # <NAME>, Accessolutions, <NAME> # This file is covered by the GNU General Public License. # See the file COPYING for more details. from typing import Any, Callable, Union import os import itertools import collections import winsound import time import weakref import wx import core from logHandler import log import documentBase import review import scriptHandler import eventHandler import nvwave import queueHandler import gui import ui import cursorManager from scriptHandler import script, isScriptWaiting, willSayAllResume import aria import controlTypes from controlTypes import OutputReason import config import textInfos import braille import vision import speech from speech import sayAll import treeInterceptorHandler import inputCore import api import gui.guiHelper from gui.dpiScalingHelper import DpiScalingHelperMixinWithoutInit from NVDAObjects import NVDAObject import gui.contextHelp from abc import ABCMeta, abstractmethod import globalVars from typing import Optional def reportPassThrough(treeInterceptor,onlyIfChanged=True): """Reports the pass through mode if it has changed. @param treeInterceptor: The current Browse Mode treeInterceptor. @type treeInterceptor: L{BrowseModeTreeInterceptor} @param onlyIfChanged: if true reporting will not happen if the last reportPassThrough reported the same thing. @type onlyIfChanged: bool """ if not onlyIfChanged or treeInterceptor.passThrough != reportPassThrough.last: if config.conf["virtualBuffers"]["passThroughAudioIndication"]: sound = "focusMode.wav" if treeInterceptor.passThrough else "browseMode.wav" nvwave.playWaveFile(os.path.join(globalVars.appDir, "waves", sound)) else: if treeInterceptor.passThrough: # Translators: The mode to interact with controls in documents ui.message(_("Focus mode")) else: # Translators: The mode that presents text in a flat representation # that can be navigated with the cursor keys like in a text document ui.message(_("Browse mode")) reportPassThrough.last = treeInterceptor.passThrough reportPassThrough.last = False def mergeQuickNavItemIterators(iterators,direction="next"): """ Merges multiple iterators that emit L{QuickNavItem} objects, yielding them from first to last. They are sorted using min or max (__lt__ should be implemented on the L{QuickNavItem} objects). @param iters: the iterators you want to merge. @type iters: sequence of iterators that emit L{QuicknavItem} objects. @param direction: the direction these iterators are searching (e.g. next, previous) @type direction: string """ finder=min if direction=="next" else max curValues=[] # Populate a list with all iterators and their corisponding first value for it in iterators: try: val=next(it) except StopIteration: continue curValues.append((it,val)) # Until all iterators have been used up, # Find the first (minimum or maximum) of all the values, # emit that, and update the list with the next available value for the iterator whose value was emitted. while len(curValues)>0: first=finder(curValues,key=lambda x: x[1]) curValues.remove(first) it,val=first yield val try: newVal=next(it) except StopIteration: continue curValues.append((it,newVal)) class QuickNavItem(object, metaclass=ABCMeta): """ Emitted by L{BrowseModeTreeInterceptor._iterNodesByType}, this represents one of many positions in a browse mode document, based on the type of item being searched for (e.g. link, heading, table etc).""" itemType=None #: The type of items searched for (e.g. link, heading, table etc) label=None #: The label that should represent this item in the Elements list. isAfterSelection=False #: Is this item positioned after the caret in the document? Used by the elements list to place its own selection. def __init__(self,itemType,document): """ @param itemType: the type that was searched for (e.g. link, heading, table etc) @type itemType: string @param document: the browse mode document this item is a part of. @type document: L{BrowseModeTreeInterceptor} """ self.itemType=itemType self.document=document @abstractmethod def isChild(self,parent): """ Is this item a child of the given parent? This is used when representing items in a hierarchical tree structure, such as the Elements List. @param parent: the item of whom this item may be a child of. @type parent: L{QuickNavItem} @return: True if this item is a child, false otherwise. @rtype: bool """ raise NotImplementedError @abstractmethod def report(self,readUnit=None): """ Reports the contents of this item. @param readUnit: the optional unit (e.g. line, paragraph) that should be used to announce the item position when moved to. If not given, then the full sise of the item is used. @type readUnit: a L{textInfos}.UNIT_* constant. """ raise NotImplementedError @abstractmethod def moveTo(self): """ Moves the browse mode caret or focus to this item. """ raise NotImplementedError def activate(self): """ Activates this item's position. E.g. follows a link, presses a button etc. """ raise NotImplementedError def rename(self,newName): """ Renames this item with the new name. """ raise NotImplementedError @property def isRenameAllowed(self): return False class TextInfoQuickNavItem(QuickNavItem): """ Represents a quick nav item in a browse mode document who's positions are represented by a L{textInfos.TextInfo}. """ def __init__(self,itemType,document,textInfo): """ See L{QuickNavItem.__init__} for itemType and document argument definitions. @param textInfo: the textInfo position this item represents. @type textInfo: L{textInfos.TextInfo} """ self.textInfo=textInfo super(TextInfoQuickNavItem,self).__init__(itemType,document) def __lt__(self,other): return self.textInfo.compareEndPoints(other.textInfo,"startToStart")<0 @property def obj(self): return self.textInfo.basePosition if isinstance(self.textInfo.basePosition,NVDAObject) else None @property def label(self): return self.textInfo.text.strip() def isChild(self,parent): if parent.textInfo.isOverlapping(self.textInfo): return True return False def report(self,readUnit=None): info=self.textInfo # If we are dealing with a form field, ensure we don't read the whole content if it's an editable text. if self.itemType == "formField": if self.obj.role == controlTypes.Role.EDITABLETEXT: readUnit = textInfos.UNIT_LINE if readUnit: fieldInfo = info.copy() info.collapse() info.move(readUnit, 1, endPoint="end") if info.compareEndPoints(fieldInfo, "endToEnd") > 0: # We've expanded past the end of the field, so limit to the end of the field. info.setEndPoint(fieldInfo, "endToEnd") speech.speakTextInfo(info, reason=OutputReason.QUICKNAV) def activate(self): self.textInfo.obj._activatePosition(info=self.textInfo) def moveTo(self): if self.document.passThrough and getattr(self, "obj", False): if controlTypes.State.FOCUSABLE in self.obj.states: self.obj.setFocus() return self.document.passThrough = False reportPassThrough(self.document) info = self.textInfo.copy() info.collapse() self.document._set_selection(info, reason=OutputReason.QUICKNAV) @property def isAfterSelection(self): caret=self.document.makeTextInfo(textInfos.POSITION_CARET) return self.textInfo.compareEndPoints(caret, "startToStart") > 0 def _getLabelForProperties(self, labelPropertyGetter: Callable[[str], Optional[Any]]): """ Fetches required properties for this L{TextInfoQuickNavItem} and constructs a label to be shown in an elements list. This can be used by subclasses to implement the L{label} property. @Param labelPropertyGetter: A callable taking 1 argument, specifying the property to fetch. For example, if L{itemType} is landmark, the callable must return the landmark type when "landmark" is passed as the property argument. Alternative property names might be name or value. The callable must return None if the property doesn't exist. An expected callable might be get method on a L{Dict}, or "lambda property: getattr(self.obj, property, None)" for an L{NVDAObject}. """ content = self.textInfo.text.strip() if self.itemType == "heading": # Output: displayed text of the heading. return content labelParts = None name = labelPropertyGetter("name") if self.itemType == "landmark": landmark = aria.landmarkRoles.get(labelPropertyGetter("landmark")) # Example output: main menu; navigation labelParts = (name, landmark) else: role: Union[controlTypes.Role, int] = labelPropertyGetter("role") role = controlTypes.Role(role) roleText = role.displayString # Translators: Reported label in the elements list for an element which which has no name and value unlabeled = _("Unlabeled") realStates = labelPropertyGetter("states") labeledStates = " ".join(controlTypes.processAndLabelStates(role, realStates, OutputReason.FOCUS)) if self.itemType == "formField": if role in ( controlTypes.Role.BUTTON, controlTypes.Role.DROPDOWNBUTTON, controlTypes.Role.TOGGLEBUTTON, controlTypes.Role.SPLITBUTTON, controlTypes.Role.MENUBUTTON, controlTypes.Role.DROPDOWNBUTTONGRID, controlTypes.Role.TREEVIEWBUTTON ): # Example output: Mute; toggle button; pressed labelParts = (content or name or unlabeled, roleText, labeledStates) else: # Example output: Find a repository...; edit; has auto complete; NVDA labelParts = (name or unlabeled, roleText, labeledStates, content) elif self.itemType in ("link", "button"): # Example output: You have unread notifications; visited labelParts = (content or name or unlabeled, labeledStates) if labelParts: label = "; ".join(lp for lp in labelParts if lp) else: label = content return label class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor): scriptCategory = inputCore.SCRCAT_BROWSEMODE _disableAutoPassThrough = False APPLICATION_ROLES = (controlTypes.Role.APPLICATION, controlTypes.Role.DIALOG) def _get_currentNVDAObject(self): raise NotImplementedError def _get_currentFocusableNVDAObject(self): return self.makeTextInfo(textInfos.POSITION_CARET).focusableNVDAObjectAtStart def event_treeInterceptor_gainFocus(self): """Triggered when this browse mode interceptor gains focus. This event is only fired upon entering this treeInterceptor when it was not the current treeInterceptor before. This is different to L{event_gainFocus}, which is fired when an object inside this treeInterceptor gains focus, even if that object is in the same treeInterceptor. """ reportPassThrough(self) ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES = frozenset({ controlTypes.Role.COMBOBOX, controlTypes.Role.EDITABLETEXT, controlTypes.Role.LIST, controlTypes.Role.LISTITEM, controlTypes.Role.SLIDER, controlTypes.Role.TABCONTROL, controlTypes.Role.MENUBAR, controlTypes.Role.POPUPMENU, controlTypes.Role.TREEVIEW, controlTypes.Role.TREEVIEWITEM, controlTypes.Role.SPINBUTTON, controlTypes.Role.TABLEROW, controlTypes.Role.TABLECELL, controlTypes.Role.TABLEROWHEADER, controlTypes.Role.TABLECOLUMNHEADER, }) SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES = frozenset({ controlTypes.Role.LISTITEM, controlTypes.Role.RADIOBUTTON, controlTypes.Role.TAB, controlTypes.Role.MENUITEM, controlTypes.Role.RADIOMENUITEM, controlTypes.Role.CHECKMENUITEM, }) IGNORE_DISABLE_PASS_THROUGH_WHEN_FOCUSED_ROLES = frozenset({ controlTypes.Role.MENUITEM, controlTypes.Role.RADIOMENUITEM, controlTypes.Role.CHECKMENUITEM, controlTypes.Role.TABLECELL, }) def shouldPassThrough(self, obj, reason: Optional[OutputReason] = None): """Determine whether pass through mode should be enabled (focus mode) or disabled (browse mode) for a given object. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @param reason: The reason for this query; one of the output reasons, or C{None} for manual pass through mode activation by the user. @return: C{True} if pass through mode (focus mode) should be enabled, C{False} if it should be disabled (browse mode). """ if reason and ( self.disableAutoPassThrough or (reason == OutputReason.FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]) or (reason == OutputReason.CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]) ): # This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state. return self.passThrough if reason == OutputReason.QUICKNAV: return False states = obj.states role = obj.role if controlTypes.State.EDITABLE in states and controlTypes.State.UNAVAILABLE not in states: return True # Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable. if not obj.isFocusable and controlTypes.State.FOCUSED not in states and role != controlTypes.Role.POPUPMENU: return False # many controls that are read-only should not switch to passThrough. # However, there are exceptions. if controlTypes.State.READONLY in states: # #13221: For Slack message lists, and the MS Edge downloads window, switch to passthrough # even though the list item and list are read-only, but focusable. if ( role == controlTypes.Role.LISTITEM and controlTypes.State.FOCUSED in states and obj.parent.role == controlTypes.Role.LIST and controlTypes.State.FOCUSABLE in obj.parent.states ): return True # Certain controls such as combo boxes and readonly edits are read-only but still interactive. # #5118: read-only ARIA grids should also be allowed (focusable table cells, rows and headers). if role not in ( controlTypes.Role.EDITABLETEXT, controlTypes.Role.COMBOBOX, controlTypes.Role.TABLEROW, controlTypes.Role.TABLECELL, controlTypes.Role.TABLEROWHEADER, controlTypes.Role.TABLECOLUMNHEADER ): return False # Any roles or states for which we always switch to passThrough if role in self.ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES or controlTypes.State.EDITABLE in states: return True # focus is moving to this control. Perhaps after pressing tab or clicking a button that brings up a menu (via javascript) if reason == OutputReason.FOCUS: if role in self.SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES: return True # If this is a focus change, pass through should be enabled for certain ancestor containers. # this is done last for performance considerations. Walking up the through the parents could be costly while obj and obj != self.rootNVDAObject: if obj.role == controlTypes.Role.TOOLBAR: return True obj = obj.parent return False def _get_shouldTrapNonCommandGestures(self): return config.conf['virtualBuffers']['trapNonCommandGestures'] def script_trapNonCommandGesture(self,gesture): winsound.PlaySound("default",1) singleLetterNavEnabled=True #: Whether single letter navigation scripts should be active (true) or if these letters should fall to the application. def getAlternativeScript(self,gesture,script): if self.passThrough or not gesture.isCharacter: return script if not self.singleLetterNavEnabled: return None if not script and self.shouldTrapNonCommandGestures: script=self.script_trapNonCommandGesture return script def script_toggleSingleLetterNav(self,gesture): if self.singleLetterNavEnabled: self.singleLetterNavEnabled=False # Translators: Reported when single letter navigation in browse mode is turned off. ui.message(_("Single letter navigation off")) else: self.singleLetterNavEnabled=True # Translators: Reported when single letter navigation in browse mode is turned on. ui.message(_("Single letter navigation on")) # Translators: the description for the toggleSingleLetterNavigation command in browse mode. script_toggleSingleLetterNav.__doc__=_("Toggles single letter navigation on and off. When on, single letter keys in browse mode jump to various kinds of elements on the page. When off, these keys are passed to the application") def _get_ElementsListDialog(self): return ElementsListDialog def _iterNodesByType(self,itemType,direction="next",pos=None): """ Yields L{QuickNavItem} objects representing the ordered positions in this document according to the type being searched for (e.g. link, heading, table etc). @param itemType: the type being searched for (e.g. link, heading, table etc) @type itemType: string @param direction: the direction in which to search (next, previous, up) @type direction: string @param pos: the position in the document from where to start the search. @type pos: Usually an L{textInfos.TextInfo} @raise NotImplementedError: This type is not supported by this BrowseMode implementation """ raise NotImplementedError def _iterNotLinkBlock(self, direction="next", pos=None): raise NotImplementedError def _quickNavScript(self,gesture, itemType, direction, errorMessage, readUnit): if itemType=="notLinkBlock": iterFactory=self._iterNotLinkBlock else: iterFactory=lambda direction,info: self._iterNodesByType(itemType,direction,info) info=self.selection try: item = next(iterFactory(direction, info)) except NotImplementedError: # Translators: a message when a particular quick nav command is not supported in the current document. ui.message(_("Not supported in this document")) return except StopIteration: ui.message(errorMessage) return # #8831: Report before moving because moving might change the focus, which # might mutate the document, potentially invalidating info if it is # offset-based. if not gesture or not willSayAllResume(gesture): item.report(readUnit=readUnit) item.moveTo() @classmethod def addQuickNav( cls, itemType: str, key: Optional[str], nextDoc: str, nextError: str, prevDoc: str, prevError: str, readUnit: Optional[str] = None ): """Adds a script for the given quick nav item. @param itemType: The type of item, I.E. "heading" "Link" ... @param key: The quick navigation key to bind to the script. Shift is automatically added for the previous item gesture. E.G. h for heading. If C{None} is provided, the script is unbound by default. @param nextDoc: The command description to bind to the script that yields the next quick nav item. @param nextError: The error message if there are no more quick nav items of type itemType in this direction. @param prevDoc: The command description to bind to the script that yields the previous quick nav item. @param prevError: The error message if there are no more quick nav items of type itemType in this direction. @param readUnit: The unit (one of the textInfos.UNIT_* constants) to announce when moving to this type of item. For example, only the line is read when moving to tables to avoid reading a potentially massive table. If None, the entire item will be announced. """ scriptSuffix = itemType[0].upper() + itemType[1:] scriptName = "next%s" % scriptSuffix funcName = "script_%s" % scriptName script = lambda self,gesture: self._quickNavScript(gesture, itemType, "next", nextError, readUnit) script.__doc__ = nextDoc script.__name__ = funcName script.resumeSayAllMode = sayAll.CURSOR.CARET setattr(cls, funcName, script) if key is not None: cls.__gestures["kb:%s" % key] = scriptName scriptName = "previous%s" % scriptSuffix funcName = "script_%s" % scriptName script = lambda self,gesture: self._quickNavScript(gesture, itemType, "previous", prevError, readUnit) script.__doc__ = prevDoc script.__name__ = funcName script.resumeSayAllMode = sayAll.CURSOR.CARET setattr(cls, funcName, script) if key is not None: cls.__gestures["kb:shift+%s" % key] = scriptName def script_elementsList(self, gesture): # We need this to be a modal dialog, but it mustn't block this script. def run(): gui.mainFrame.prePopup() d = self.ElementsListDialog(self) d.ShowModal() d.Destroy() gui.mainFrame.postPopup() wx.CallAfter(run) # Translators: the description for the Elements List command in browse mode. script_elementsList.__doc__ = _("Lists various types of elements in this document") script_elementsList.ignoreTreeInterceptorPassThrough = True def _activateNVDAObject(self, obj): """Activate an object in response to a user request. This should generally perform the default action or click on the object. @param obj: The object to activate. @type obj: L{NVDAObjects.NVDAObject} """ try: obj.doAction() except NotImplementedError: log.debugWarning("doAction not implemented") def _activatePosition(self, obj=None): if not obj: obj=self.currentNVDAObject if not obj: return if obj.role == controlTypes.Role.MATH: import mathPres try: return mathPres.interactWithMathMl(obj.mathMl) except (NotImplementedError, LookupError): pass return if self.shouldPassThrough(obj): obj.setFocus() self.passThrough = True reportPassThrough(self) elif obj.role == controlTypes.Role.EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES: obj.setFocus() speech.speakObject(obj, reason=OutputReason.FOCUS) else: self._activateNVDAObject(obj) def script_activatePosition(self,gesture): if config.conf["virtualBuffers"]["autoFocusFocusableElements"]: self._activatePosition() else: self._focusLastFocusableObject(activatePosition=True) # Translators: the description for the activatePosition script on browseMode documents. script_activatePosition.__doc__ = _("Activates the current object in the document") def _focusLastFocusableObject(self, activatePosition=False): """Used when auto focus focusable elements is disabled to sync the focus to the browse mode cursor. When auto focus focusable elements is disabled, NVDA doesn't focus elements as the user moves the browse mode cursor. However, there are some cases where the user always wants to interact with the focus; e.g. if they press the applications key to open the context menu. In these cases, this method is called first to sync the focus to the browse mode cursor. """ obj = self.currentFocusableNVDAObject if obj!=self.rootNVDAObject and self._shouldSetFocusToObj(obj) and obj!= api.getFocusObject(): obj.setFocus() # We might be about to activate or pass through a key which will cause # this object to change (e.g. checking a check box). However, we won't # actually get the focus event until after the change has occurred. # Therefore, we must cache properties for speech before the change occurs. speech.speakObject(obj, OutputReason.ONLYCACHE) self._objPendingFocusBeforeActivate = obj if activatePosition: # Make sure we activate the object at the caret, which is not necessarily focusable. self._activatePosition() def script_passThrough(self,gesture): if not config.conf["virtualBuffers"]["autoFocusFocusableElements"]: self._focusLastFocusableObject() gesture.send() # Translators: the description for the passThrough script on browseMode documents. script_passThrough.__doc__ = _("Passes gesture through to the application") def script_disablePassThrough(self, gesture): if not self.passThrough or self.disableAutoPassThrough: return gesture.send() # #3215 ARIA menus should get the Escape key unconditionally so they can handle it without invoking browse mode first obj = api.getFocusObject() if obj and obj.role in self.IGNORE_DISABLE_PASS_THROUGH_WHEN_FOCUSED_ROLES: return gesture.send() self.passThrough = False self.disableAutoPassThrough = False reportPassThrough(self) script_disablePassThrough.ignoreTreeInterceptorPassThrough = True def _set_disableAutoPassThrough(self, state): # If the user manually switches to focus mode with NVDA+space, that enables # pass-through and disables auto pass-through. If auto focusing of focusable # elements is disabled, NVDA won't have synced the focus to the browse mode # cursor. However, since the user is switching to focus mode, they probably # want to interact with the focus, so sync the focus here. if ( state and not config.conf["virtualBuffers"]["autoFocusFocusableElements"] and self.passThrough ): self._focusLastFocusableObject() self._disableAutoPassThrough = state def _get_disableAutoPassThrough(self): return self._disableAutoPassThrough __gestures={ "kb:NVDA+f7": "elementsList", "kb:enter": "activatePosition", "kb:numpadEnter": "activatePosition", "kb:space": "activatePosition", "kb:NVDA+shift+space":"toggleSingleLetterNav", "kb:escape": "disablePassThrough", "kb:control+enter": "passThrough", "kb:control+numpadEnter": "passThrough", "kb:shift+enter": "passThrough", "kb:shift+numpadEnter": "passThrough", "kb:control+shift+enter": "passThrough", "kb:control+shift+numpadEnter": "passThrough", "kb:alt+enter": "passThrough", "kb:alt+numpadEnter": "passThrough", "kb:applications": "passThrough", "kb:shift+applications": "passThrough", "kb:shift+f10": "passThrough", } # Add quick navigation scripts. qn = BrowseModeTreeInterceptor.addQuickNav qn("heading", key="h", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading")) qn("heading1", key="1", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 1"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 1"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 1"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 1")) qn("heading2", key="2", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 2"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 2"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 2"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 2")) qn("heading3", key="3", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 3"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 3"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 3"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 3")) qn("heading4", key="4", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 4"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 4"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 4"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 4")) qn("heading5", key="5", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 5"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 5"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 5"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 5")) qn("heading6", key="6", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next heading at level 6"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next heading at level 6"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous heading at level 6"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous heading at level 6")) qn("table", key="t", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next table"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next table"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous table"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous table"), readUnit=textInfos.UNIT_LINE) qn("link", key="k", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous link")) qn("visitedLink", key="v", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next visited link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next visited link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous visited link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous visited link")) qn("unvisitedLink", key="u", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next unvisited link"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next unvisited link"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous unvisited link"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous unvisited link")) qn("formField", key="f", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next form field"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next form field"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous form field"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous form field")) qn("list", key="l", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next list"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next list"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous list"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous list"), readUnit=textInfos.UNIT_LINE) qn("listItem", key="i", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next list item"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next list item"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous list item"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous list item")) qn("button", key="b", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next button"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next button"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous button"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous button")) qn("edit", key="e", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next edit field"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next edit field"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous edit field"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous edit field"), readUnit=textInfos.UNIT_LINE) qn("frame", key="m", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next frame"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next frame"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous frame"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous frame"), readUnit=textInfos.UNIT_LINE) qn("separator", key="s", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next separator"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next separator"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous separator"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous separator")) qn("radioButton", key="r", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next radio button"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next radio button"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous radio button"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous radio button")) qn("comboBox", key="c", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next combo box"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next combo box"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous combo box"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous combo box")) qn("checkBox", key="x", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next check box"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next check box"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous check box"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous check box")) qn("graphic", key="g", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next graphic"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next graphic"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous graphic"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous graphic")) qn("blockQuote", key="q", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next block quote"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next block quote"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous block quote"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous block quote")) qn("notLinkBlock", key="n", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("skips forward past a block of links"), # Translators: Message presented when the browse mode element is not found. nextError=_("no more text after a block of links"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("skips backward past a block of links"), # Translators: Message presented when the browse mode element is not found. prevError=_("no more text before a block of links"), readUnit=textInfos.UNIT_LINE) qn("landmark", key="d", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next landmark"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next landmark"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous landmark"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous landmark"), readUnit=textInfos.UNIT_LINE) qn("embeddedObject", key="o", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next embedded object"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next embedded object"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous embedded object"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous embedded object")) qn("annotation", key="a", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next annotation"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next annotation"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous annotation"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous annotation")) qn("error", key="w", # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next error"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next error"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous error"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous error")) qn( "article", key=None, # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next article"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next article"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous article"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous article") ) qn( "grouping", key=None, # Translators: Input help message for a quick navigation command in browse mode. nextDoc=_("moves to the next grouping"), # Translators: Message presented when the browse mode element is not found. nextError=_("no next grouping"), # Translators: Input help message for a quick navigation command in browse mode. prevDoc=_("moves to the previous grouping"), # Translators: Message presented when the browse mode element is not found. prevError=_("no previous grouping") ) del qn class ElementsListDialog( DpiScalingHelperMixinWithoutInit, gui.contextHelp.ContextHelpMixin, wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO ): helpId = "ElementsList" ELEMENT_TYPES = ( # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("link", _("Lin&ks")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("heading", _("&Headings")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("formField", _("&Form fields")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("button", _("&Buttons")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("landmark", _("Lan&dmarks")), ) Element = collections.namedtuple("Element", ("item", "parent")) lastSelectedElementType=0 def __init__(self, document): super().__init__( parent=gui.mainFrame, # Translators: The title of the browse mode Elements List dialog. title=_("Elements List") ) self.document = document mainSizer = wx.BoxSizer(wx.VERTICAL) contentsSizer = wx.BoxSizer(wx.VERTICAL) # Translators: The label of a group of radio buttons to select the type of element # in the browse mode Elements List dialog. child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES)) child.SetSelection(self.lastSelectedElementType) child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange) contentsSizer.Add(child, flag=wx.EXPAND) contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) self.tree = wx.TreeCtrl( self, size=self.scaleSize((500, 300)), # height is chosen to ensure the dialog will fit on an 800x600 screen style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS ) self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus) self.tree.Bind(wx.EVT_CHAR, self.onTreeChar) self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin) self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd) self.treeRoot = self.tree.AddRoot("root") contentsSizer.Add(self.tree,flag=wx.EXPAND) contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) # Translators: The label of an editable text field to filter the elements # in the browse mode Elements List dialog. filterText = _("Filter b&y:") labeledCtrl = gui.guiHelper.LabeledControlHelper(self, filterText, wx.TextCtrl) self.filterEdit = labeledCtrl.control self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange) contentsSizer.Add(labeledCtrl.sizer) contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS) bHelper = gui.guiHelper.ButtonHelper(wx.HORIZONTAL) # Translators: The label of a button to activate an element in the browse mode Elements List dialog. # Beware not to set an accelerator that would collide with other controls in this dialog, such as an # element type radio label. self.activateButton = bHelper.addButton(self, label=_("Activate")) self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True)) # Translators: The label of a button to move to an element # in the browse mode Elements List dialog. self.moveButton = bHelper.addButton(self, label=_("&Move to")) self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False)) bHelper.addButton(self, id=wx.ID_CANCEL) contentsSizer.Add(bHelper.sizer, flag=wx.ALIGN_RIGHT) mainSizer.Add(contentsSizer, border=gui.guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL) mainSizer.Fit(self) self.SetSizer(mainSizer) self.tree.SetFocus() self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0]) self.CentreOnScreen() def onElementTypeChange(self, evt): elementType=evt.GetInt() # We need to make sure this gets executed after the focus event. # Otherwise, NVDA doesn't seem to get the event. queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0]) self.lastSelectedElementType=elementType def initElementType(self, elType): if elType in ("link","button"): # Links and buttons can be activated. self.activateButton.Enable() self.SetAffirmativeId(self.activateButton.GetId()) else: # No other element type can be activated. self.activateButton.Disable() self.SetAffirmativeId(self.moveButton.GetId()) # Gather the elements of this type. self._elements = [] self._initialElement = None parentElements = [] isAfterSelection=False for item in self.document._iterNodesByType(elType): # Find the parent element, if any. for parent in reversed(parentElements): if item.isChild(parent.item): break else: # We're not a child of this parent, so this parent has no more children and can be removed from the stack. parentElements.pop() else: # No parent found, so we're at the root. # Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack. parent = None element=self.Element(item,parent) self._elements.append(element) if not isAfterSelection: isAfterSelection=item.isAfterSelection if not isAfterSelection: # The element immediately preceding or overlapping the caret should be the initially selected element. # Since we have not yet passed the selection, use this as the initial element. try: self._initialElement = self._elements[-1] except IndexError: # No previous element. pass # This could be the parent of a subsequent element, so add it to the parents stack. parentElements.append(element) # Start with no filtering. self.filterEdit.ChangeValue("") self.filter("", newElementType=True) def filter(self, filterText, newElementType=False): # If this is a new element type, use the element nearest the cursor. # Otherwise, use the currently selected element. # #8753: wxPython 4 returns "invalid tree item" when the tree view is empty, so use initial element if appropriate. try: defaultElement = self._initialElement if newElementType else self.tree.GetItemData(self.tree.GetSelection()) except: defaultElement = self._initialElement # Clear the tree. self.tree.DeleteChildren(self.treeRoot) # Populate the tree with elements matching the filter text. elementsToTreeItems = {} defaultItem = None matched = False #Do case-insensitive matching by lowering both filterText and each element's text. filterText=filterText.lower() for element in self._elements: label=element.item.label if filterText and filterText not in label.lower(): continue matched = True parent = element.parent if parent: parent = elementsToTreeItems.get(parent) item = self.tree.AppendItem(parent or self.treeRoot, label) self.tree.SetItemData(item, element) elementsToTreeItems[element] = item if element == defaultElement: defaultItem = item self.tree.ExpandAll() if not matched: # No items, so disable the buttons. self.activateButton.Disable() self.moveButton.Disable() return # If there's no default item, use the first item in the tree. self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0]) # Enable the button(s). # If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here. if self.AffirmativeId == self.activateButton.Id: self.activateButton.Enable() self.moveButton.Enable() def onTreeSetFocus(self, evt): # Start with no search. self._searchText = "" self._searchCallLater = None evt.Skip() def onTreeChar(self, evt): key = evt.KeyCode if key == wx.WXK_RETURN: # The enter key should be propagated to the dialog and thus activate the default button, # but this is broken (wx ticket #3725). # Therefore, we must catch the enter key here. # Activate the current default button. evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY) button = self.FindWindowById(self.AffirmativeId) if button.Enabled: button.ProcessEvent(evt) else: wx.Bell() elif key == wx.WXK_F2: item=self.tree.GetSelection() if item: selectedItemType=self.tree.GetItemData(item).item self.tree.EditLabel(item) evt.Skip() elif key >= wx.WXK_START or key == wx.WXK_BACK: # Non-printable character. self._searchText = "" evt.Skip() else: # Search the list. # We have to implement this ourselves, as tree views don't accept space as a search character. char = chr(evt.UnicodeKey).lower() # IF the same character is typed twice, do the same search. if self._searchText != char: self._searchText += char if self._searchCallLater: self._searchCallLater.Restart() else: self._searchCallLater = wx.CallLater(1000, self._clearSearchText) self.search(self._searchText) def onTreeLabelEditBegin(self,evt): item=self.tree.GetSelection() selectedItemType = self.tree.GetItemData(item).item if not selectedItemType.isRenameAllowed: evt.Veto() def onTreeLabelEditEnd(self,evt): selectedItemNewName=evt.GetLabel() item=self.tree.GetSelection() selectedItemType = self.tree.GetItemData(item).item selectedItemType.rename(selectedItemNewName) def _clearSearchText(self): self._searchText = "" def search(self, searchText): item = self.tree.GetSelection() if not item: # No items. return # First try searching from the current item. # Failing that, search from the first item. items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0])) if len(searchText) == 1: # If only a single character has been entered, skip (search after) the current item. next(items) for item in items: if self.tree.GetItemText(item).lower().startswith(searchText): self.tree.SelectItem(item) return # Not found. wx.Bell() def _iterReachableTreeItemsFromItem(self, item): while item: yield item childItem = self.tree.GetFirstChild(item)[0] if childItem and self.tree.IsExpanded(item): # Has children and is reachable, so recurse. for childItem in self._iterReachableTreeItemsFromItem(childItem): yield childItem item = self.tree.GetNextSibling(item) def onFilterEditTextChange(self, evt): self.filter(self.filterEdit.GetValue()) evt.Skip() def onAction(self, activate): prevFocus = gui.mainFrame.prevFocus self.Close() # Save off the last selected element type on to the class so its used in initialization next time. self.__class__.lastSelectedElementType=self.lastSelectedElementType item = self.tree.GetSelection() item = self.tree.GetItemData(item).item if activate: item.activate() else: def move(): speech.cancelSpeech() # Avoid double announce if item.obj is about to gain focus. if not ( self.document.passThrough and getattr(item, "obj", False) and item.obj != prevFocus and controlTypes.State.FOCUSABLE in item.obj.states ): # #8831: Report before moving because moving might change the focus, which # might mutate the document, potentially invalidating info if it is # offset-based. item.report() item.moveTo() # We must use core.callLater rather than wx.CallLater to ensure that the callback runs within NVDA's core pump. # If it didn't, and it directly or indirectly called wx.Yield, it could start executing NVDA's core pump from within the yield, causing recursion. core.callLater(100, move) class BrowseModeDocumentTextInfo(textInfos.TextInfo): def _get_focusableNVDAObjectAtStart(self): try: item = next(self.obj._iterNodesByType("focusable", "up", self)) except StopIteration: return self.obj.rootNVDAObject if not item: return self.obj.rootNVDAObject return item.obj class BrowseModeDocumentTreeInterceptor(documentBase.DocumentWithTableNavigation,cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor): programmaticScrollMayFireEvent = False def __init__(self,obj): super(BrowseModeDocumentTreeInterceptor,self).__init__(obj) self._lastProgrammaticScrollTime = None self.documentConstantIdentifier = self.documentConstantIdentifier self._lastFocusObj = None self._objPendingFocusBeforeActivate = None self._hadFirstGainFocus = False self._enteringFromOutside = True # We need to cache this because it will be unavailable once the document dies. if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"): self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {} self._lastCaretPosition = None #: True if the last caret move was due to a focus change. self._lastCaretMoveWasFocus = False def terminate(self): if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition: try: self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition except AttributeError: # The app module died. pass def _get_currentNVDAObject(self): return self.makeTextInfo(textInfos.POSITION_CARET).NVDAObjectAtStart def event_treeInterceptor_gainFocus(self): doSayAll=False hadFirstGainFocus=self._hadFirstGainFocus if not hadFirstGainFocus: # This treeInterceptor is gaining focus for the first time. # Fake a focus event on the focus object, as the treeInterceptor may have missed the actual focus event. focus = api.getFocusObject() self.event_gainFocus(focus, lambda: focus.event_gainFocus()) if not self.passThrough: # We only set the caret position if in browse mode. # If in focus mode, the document must have forced the focus somewhere, # so we don't want to override it. initialPos = self._getInitialCaretPos() if initialPos: self.selection = self.makeTextInfo(initialPos) reportPassThrough(self) doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad'] self._hadFirstGainFocus = True if not self.passThrough: if doSayAll: speech.speakObjectProperties(self.rootNVDAObject, name=True, states=True, reason=OutputReason.FOCUS) sayAll.SayAllHandler.readText(sayAll.CURSOR.CARET) else: # Speak it like we would speak focus on any other document object. # This includes when entering the treeInterceptor for the first time: if not hadFirstGainFocus: speech.speakObject(self.rootNVDAObject, reason=OutputReason.FOCUS) else: # And when coming in from an outside object # #4069 But not when coming up from a non-rendered descendant. ancestors=api.getFocusAncestors() fdl=api.getFocusDifferenceLevel() try: tl=ancestors.index(self.rootNVDAObject) except ValueError: tl=len(ancestors) if fdl<=tl: speech.speakObject(self.rootNVDAObject, reason=OutputReason.FOCUS) info = self.selection if not info.isCollapsed: speech.speakPreselectedText(info.text) else: info.expand(textInfos.UNIT_LINE) speech.speakTextInfo(info, reason=OutputReason.CARET, unit=textInfos.UNIT_LINE) reportPassThrough(self) braille.handler.handleGainFocus(self) def event_caret(self, obj, nextHandler): if self.passThrough: nextHandler() def _activateLongDesc(self,controlField): """ Activates (presents) the long description for a particular field (usually a graphic). @param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state. @type controlField: dict """ raise NotImplementedError def _activatePosition(self, obj=None, info=None): if info: obj=info.NVDAObjectAtStart if not obj: return super(BrowseModeDocumentTreeInterceptor,self)._activatePosition(obj=obj) def _set_selection(self, info, reason=OutputReason.CARET): super(BrowseModeDocumentTreeInterceptor, self)._set_selection(info) if isScriptWaiting() or not info.isCollapsed: return # Save the last caret position for use in terminate(). # This must be done here because the buffer might be cleared just before terminate() is called, # causing the last caret position to be lost. caret = info.copy() caret.collapse() self._lastCaretPosition = caret.bookmark review.handleCaretMove(caret) if reason == OutputReason.FOCUS: self._lastCaretMoveWasFocus = True focusObj = api.getFocusObject() if focusObj==self.rootNVDAObject: return else: self._lastCaretMoveWasFocus = False focusObj=info.focusableNVDAObjectAtStart obj=info.NVDAObjectAtStart if not obj: log.debugWarning("Invalid NVDAObjectAtStart") return if obj==self.rootNVDAObject: return obj.scrollIntoView() if self.programmaticScrollMayFireEvent: self._lastProgrammaticScrollTime = time.time() if focusObj: self.passThrough = self.shouldPassThrough(focusObj, reason=reason) if ( not eventHandler.isPendingEvents("gainFocus") and focusObj != self.rootNVDAObject and focusObj != api.getFocusObject() and self._shouldSetFocusToObj(focusObj) ): followBrowseModeFocus = config.conf["virtualBuffers"]["autoFocusFocusableElements"] if followBrowseModeFocus or self.passThrough: focusObj.setFocus() # Queue the reporting of pass through mode so that it will be spoken after the actual content. queueHandler.queueFunction(queueHandler.eventQueue, reportPassThrough, self) def _shouldSetFocusToObj(self, obj): """Determine whether an object should receive focus. Subclasses may extend or override this method. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} """ return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.Role.EMBEDDEDOBJECT def script_activateLongDesc(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand("character") for field in reversed(info.getTextWithFields()): if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart": states=field.field.get('states') if states and controlTypes.State.HASLONGDESC in states: self._activateLongDesc(field.field) break else: # Translators: the message presented when the activateLongDescription script cannot locate a long description to activate. ui.message(_("No long description")) # Translators: the description for the activateLongDescription script on browseMode documents. script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.") def event_caretMovementFailed(self, obj, nextHandler, gesture=None): if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]: return nextHandler() if gesture.mainKeyName in ("home", "end"): # Home, end, control+home and control+end should not disable pass through. return nextHandler() script = self.getScript(gesture) if not script: return nextHandler() # We've hit the edge of the focused control. # Therefore, move the virtual caret to the same edge of the field. info = self.makeTextInfo(textInfos.POSITION_CARET) info.expand(textInfos.UNIT_CONTROLFIELD) if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"): info.collapse() else: info.collapse(end=True) info.move(textInfos.UNIT_CHARACTER, -1) info.updateCaret() scriptHandler.queueScript(script, gesture) currentExpandedControl=None #: an NVDAObject representing the control that has just been expanded with the collapseOrExpandControl script. def script_collapseOrExpandControl(self, gesture): if not config.conf["virtualBuffers"]["autoFocusFocusableElements"]: self._focusLastFocusableObject() oldFocus = api.getFocusObject() oldFocusStates = oldFocus.states gesture.send() if controlTypes.State.COLLAPSED in oldFocusStates: self.passThrough = True # When a control (such as a combo box) is expanded, we expect that its descendants will be classed as being outside the browseMode document. # We save off the expanded control so that the next focus event within the browseMode document can see if it is for the control, # and if so, it disables passthrough, as the control has obviously been collapsed again. self.currentExpandedControl=oldFocus elif not self.disableAutoPassThrough: self.passThrough = False reportPassThrough(self) def _tabOverride(self, direction): """Override the tab order if the virtual caret is not within the currently focused node. This is done because many nodes are not focusable and it is thus possible for the virtual caret to be unsynchronised with the focus. In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual caret. If the virtual caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation. Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned. @param direction: The direction in which to move. @type direction: str @return: C{True} if the tab order was overridden, C{False} if not. @rtype: bool """ if self._lastCaretMoveWasFocus: # #5227: If the caret was last moved due to a focus change, don't override tab. # This ensures that tabbing behaves as expected after tabbing hits an iframe document. return False focus = api.getFocusObject() try: focusInfo = self.makeTextInfo(focus) except: return False # We only want to override the tab order if the caret is not within the focused node. caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) #Only check that the caret is within the focus for things that ar not documents #As for documents we should always override if focus.role!=controlTypes.Role.DOCUMENT or controlTypes.State.EDITABLE in focus.states: # Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges. caretInfo.expand(textInfos.UNIT_CHARACTER) if focusInfo.isOverlapping(caretInfo): return False # If we reach here, we do want to override tab/shift+tab if possible. # Find the next/previous focusable node. try: item = next(self._iterNodesByType("focusable", direction, caretInfo)) except StopIteration: return False obj=item.obj newInfo=item.textInfo if obj == api.getFocusObject(): # This node is already focused, so we need to move to and speak this node here. newCaret = newInfo.copy() newCaret.collapse() self._set_selection(newCaret, reason=OutputReason.FOCUS) if self.passThrough: obj.event_gainFocus() else: speech.speakTextInfo(newInfo, reason=OutputReason.FOCUS) else: # This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest. obj.setFocus() return True def script_tab(self, gesture): if not self._tabOverride("next"): gesture.send() def script_shiftTab(self, gesture): if not self._tabOverride("previous"): gesture.send() def event_focusEntered(self,obj,nextHandler): if obj==self.rootNVDAObject: self._enteringFromOutside = True # Even if passThrough is enabled, we still completely drop focusEntered events here. # In order to get them back when passThrough is enabled, we replay them with the _replayFocusEnteredEvents method in event_gainFocus. # The reason for this is to ensure that focusEntered events are delayed until a focus event has had a chance to disable passthrough mode. # As in this case we would not want them. def _shouldIgnoreFocus(self, obj): """Determines whether focus on a given object should be ignored. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if focus on L{obj} should be ignored, C{False} otherwise. @rtype: bool """ return False def _postGainFocus(self, obj): """Executed after a gainFocus within the browseMode document. This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler. @param obj: The object that gained focus. @type obj: L{NVDAObjects.NVDAObject} """ def _replayFocusEnteredEvents(self): # We blocked the focusEntered events because we were in browse mode, # but now that we've switched to focus mode, we need to fire them. for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]: try: parent.event_focusEntered() except: log.exception("Error executing focusEntered event: %s" % parent) def event_gainFocus(self, obj, nextHandler): enteringFromOutside=self._enteringFromOutside self._enteringFromOutside=False if not self.isReady: if self.passThrough: self._replayFocusEnteredEvents() nextHandler() return # If a control has been expanded by the collapseOrExpandControl script, and this focus event is for it, # disable passThrough and report the control, as the control has obviously been collapsed again. # Note that whether or not this focus event was for that control, the last expanded control is forgotten, so that only the next focus event for the browseMode document can handle the collapsed control. lastExpandedControl=self.currentExpandedControl self.currentExpandedControl=None if self.passThrough and obj==lastExpandedControl: self.passThrough=False reportPassThrough(self) nextHandler() return if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj: # We're entering the document from outside (not returning from an inside object/application; #3145) # and this was the last non-root node with focus, so ignore this focus event. # Otherwise, if the user switches away and back to this document, the cursor will jump to this node. # This is not ideal if the user was positioned over a node which cannot receive focus. return if obj==self.rootNVDAObject: if self.passThrough: self._replayFocusEnteredEvents() return nextHandler() return if not self.passThrough and self._shouldIgnoreFocus(obj): return # If the previous focus object was removed, we might hit a false positive for overlap detection. # Track the previous focus target so that we can account for this scenario. previousFocusObjIsDefunct = False if self._lastFocusObj: try: states = self._lastFocusObj.states previousFocusObjIsDefunct = controlTypes.State.DEFUNCT in states except Exception: log.debugWarning( "Error fetching states when checking for defunct object. Treating object as defunct anyway.", exc_info=True ) previousFocusObjIsDefunct = True self._lastFocusObj=obj try: focusInfo = self.makeTextInfo(obj) except: # This object is not in the treeInterceptor, even though it resides beneath the document. # Automatic pass through should be enabled in certain circumstances where this occurs. if not self.passThrough and self.shouldPassThrough(obj, reason=OutputReason.FOCUS): self.passThrough=True reportPassThrough(self) self._replayFocusEnteredEvents() return nextHandler() #We only want to update the caret and speak the field if we're not in the same one as before caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) # Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping. caretInfo.expand(textInfos.UNIT_CHARACTER) isOverlapping = focusInfo.isOverlapping(caretInfo) if not self._hadFirstGainFocus or not isOverlapping or (isOverlapping and previousFocusObjIsDefunct): # The virtual caret is not within the focus node. oldPassThrough=self.passThrough passThrough = self.shouldPassThrough(obj, reason=OutputReason.FOCUS) if not oldPassThrough and (passThrough or sayAll.SayAllHandler.isRunning()): # If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop. # This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change. speech.cancelSpeech() self.passThrough=passThrough if not self.passThrough: # We read the info from the browseMode document instead of the control itself. speech.speakTextInfo(focusInfo, reason=OutputReason.FOCUS) # However, we still want to update the speech property cache so that property changes will be spoken properly. speech.speakObject(obj, controlTypes.OutputReason.ONLYCACHE) # As we do not call nextHandler which would trigger the vision framework to handle gain focus, # we need to call it manually here. vision.handler.handleGainFocus(obj) else: # Although we are going to speak the object rather than textInfo content, we still need to silently speak the textInfo content so that the textInfo speech cache is updated correctly. # Not doing this would cause later browseMode speaking to either not speak controlFields it had entered, or speak controlField exits after having already exited. # See #7435 for a discussion on this. speech.speakTextInfo(focusInfo, reason=OutputReason.ONLYCACHE) self._replayFocusEnteredEvents() nextHandler() focusInfo.collapse() self._set_selection(focusInfo, reason=OutputReason.FOCUS) else: # The virtual caret was already at the focused node. if not self.passThrough: # This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking. # However, we still want to update the speech property cache so that property changes will be spoken properly. speech.speakObject(obj, OutputReason.ONLYCACHE) if config.conf["virtualBuffers"]["autoFocusFocusableElements"]: # As we do not call nextHandler which would trigger the vision framework to handle gain focus, # we need to call it manually here. # Note: this is usually called after the caret movement. vision.handler.handleGainFocus(obj) elif ( self._objPendingFocusBeforeActivate and obj == self._objPendingFocusBeforeActivate and obj is not self._objPendingFocusBeforeActivate ): # With auto focus focusable elements disabled, when the user activates # an element (e.g. by pressing enter) or presses a key which we pass # through (e.g. control+enter), we call _focusLastFocusableObject. # However, the activation/key press might cause a property change # before we get the focus event, so NVDA's normal reporting of # changes to the focus won't pick it up. # The speech property cache on _objPendingFocusBeforeActivate reflects # the properties before the activation/key, so use that to speak any # changes. speech.speakObject( self._objPendingFocusBeforeActivate, OutputReason.CHANGE ) self._objPendingFocusBeforeActivate = None else: self._replayFocusEnteredEvents() return nextHandler() self._postGainFocus(obj) event_gainFocus.ignoreIsReady=True def _handleScrollTo( self, obj: Union[NVDAObject, textInfos.TextInfo], ) -> bool: """Handle scrolling the browseMode document to a given object in response to an event. Subclasses should call this from an event which indicates that the document has scrolled. @postcondition: The virtual caret is moved to L{obj} and the buffer content for L{obj} is reported. @param obj: The object to which the document should scroll. @return: C{True} if the document was scrolled, C{False} if not. @note: If C{False} is returned, calling events should probably call their nextHandler. """ if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4: # This event was probably caused by this browseMode document's call to scrollIntoView(). # Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point. # However, pretend we handled it, as we don't want it to be passed on to the object either. return True if isinstance(obj, NVDAObject): try: scrollInfo = self.makeTextInfo(obj) except (NotImplementedError, RuntimeError): return False elif isinstance(obj, textInfos.TextInfo): scrollInfo = obj.copy() else: raise ValueError(f"{obj} is not a supported type") #We only want to update the caret and speak the field if we're not in the same one as before caretInfo=self.makeTextInfo(textInfos.POSITION_CARET) # Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping. caretInfo.expand(textInfos.UNIT_CHARACTER) if not scrollInfo.isOverlapping(caretInfo): if scrollInfo.isCollapsed: scrollInfo.expand(textInfos.UNIT_LINE) speech.speakTextInfo(scrollInfo, reason=OutputReason.CARET) scrollInfo.collapse() self.selection = scrollInfo return True return False def _isNVDAObjectInApplication_noWalk(self, obj): """Determine whether a given object is within an application without walking ancestors. The base implementation simply checks whether the object has an application role. Subclasses can override this if they can provide a definite answer without needing to walk. For example, for virtual buffers, if the object is in the buffer, it definitely isn't in an application. L{_isNVDAObjectInApplication} calls this and walks to the next ancestor if C{None} is returned. @return: C{True} if definitely in an application, C{False} if definitely not in an application, C{None} if this can't be determined without walking ancestors. """ if ( # roles such as application and dialog should be treated as being within a "application" and therefore outside of the browseMode document. obj.role in self.APPLICATION_ROLES # Anything other than an editable text box inside a combo box should be # treated as being outside a browseMode document. or ( obj.role != controlTypes.Role.EDITABLETEXT and obj.container and obj.container.role == controlTypes.Role.COMBOBOX ) ): return True return None def _isNVDAObjectInApplication(self, obj): """Determine whether a given object is within an application. The object is considered to be within an application if it or one of its ancestors has an application role. This should only be called on objects beneath the treeInterceptor's root NVDAObject. @param obj: The object in question. @type obj: L{NVDAObjects.NVDAObject} @return: C{True} if L{obj} is within an application, C{False} otherwise. @rtype: bool """ # We cache the result for each object we walk. # There can be browse mode documents within other documents and the result might be different between these, # so the cache must be maintained on the TreeInterceptor rather than the object itself. try: cache = self._isInAppCache except AttributeError: # Create this lazily, as this method isn't used by all browse mode implementations. cache = self._isInAppCache = weakref.WeakKeyDictionary() objs = [] def doResult(result): # Cache this on descendants we've walked over. for obj in objs: cache[obj] = result return result while obj and obj != self.rootNVDAObject: inApp = cache.get(obj) if inApp is not None: # We found a cached result. return doResult(inApp) objs.append(obj) inApp = self._isNVDAObjectInApplication_noWalk(obj) if inApp is not None: return doResult(inApp) # We must walk ancestors. # Cache container. container = obj.container obj.container = container obj = container return doResult(False) def _get_documentConstantIdentifier(self): """Get the constant identifier for this document. This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application. Generally, the document URL should be used. @return: The constant identifier for this document, C{None} if there is none. """ return None def _get_shouldRememberCaretPositionAcrossLoads(self): """Specifies whether the position of the caret should be remembered when this document is loaded again. This is useful when the browser remembers the scroll position for the document, but does not communicate this information via APIs. The remembered caret position is associated with this document using L{documentConstantIdentifier}. @return: C{True} if the caret position should be remembered, C{False} if not. @rtype: bool """ docConstId = self.documentConstantIdentifier # Return True if the URL indicates that this is probably a web browser document. # We do this check because we don't want to remember caret positions for email messages, etc. if isinstance(docConstId, str): protocols=("http", "https", "ftp", "ftps", "file") protocol=docConstId.split("://", 1)[0] return protocol in protocols return False def _getInitialCaretPos(self): """Retrieve the initial position of the caret after the buffer has been loaded. This position, if any, will be passed to L{makeTextInfo}. Subclasses should extend this method. @return: The initial position of the caret, C{None} if there isn't one. @rtype: TextInfo position """ if self.shouldRememberCaretPositionAcrossLoads: try: return self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] except KeyError: pass return None def getEnclosingContainerRange(self, textRange): textRange = textRange.copy() textRange.collapse() try: item = next(self._iterNodesByType("container", "up", textRange)) except (NotImplementedError,StopIteration): try: item = next(self._iterNodesByType("landmark", "up", textRange)) except (NotImplementedError,StopIteration): return return item.textInfo def script_moveToStartOfContainer(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand(textInfos.UNIT_CHARACTER) container=self.getEnclosingContainerRange(info) if not container: # Translators: Reported when the user attempts to move to the start or end of a container # (list, table, etc.) but there is no container. ui.message(_("Not in a container")) return container.collapse() self._set_selection(container, reason=OutputReason.QUICKNAV) if not willSayAllResume(gesture): container.expand(textInfos.UNIT_LINE) speech.speakTextInfo(container, reason=OutputReason.FOCUS) script_moveToStartOfContainer.resumeSayAllMode = sayAll.CURSOR.CARET # Translators: Description for the Move to start of container command in browse mode. script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table") def script_movePastEndOfContainer(self,gesture): info=self.makeTextInfo(textInfos.POSITION_CARET) info.expand(textInfos.UNIT_CHARACTER) container=self.getEnclosingContainerRange(info) if not container: # Translators: Reported when the user attempts to move to the start or end of a container # (list, table, etc.) but there is no container. ui.message(_("Not in a container")) return container.collapse(end=True) docEnd=container.obj.makeTextInfo(textInfos.POSITION_LAST) if container.compareEndPoints(docEnd,"endToEnd")>=0: container=docEnd # Translators: a message reported when: # Review cursor is at the bottom line of the current navigator object. # Landing at the end of a browse mode document when trying to jump to the end of the current container. ui.message(_("Bottom")) self._set_selection(container, reason=OutputReason.QUICKNAV) if not willSayAllResume(gesture): container.expand(textInfos.UNIT_LINE) speech.speakTextInfo(container, reason=OutputReason.FOCUS) script_movePastEndOfContainer.resumeSayAllMode = sayAll.CURSOR.CARET # Translators: Description for the Move past end of container command in browse mode. script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table") NOT_LINK_BLOCK_MIN_LEN = 30 def _isSuitableNotLinkBlock(self, textRange): return len(textRange.text) >= self.NOT_LINK_BLOCK_MIN_LEN def _iterNotLinkBlock(self, direction="next", pos=None): links = self._iterNodesByType("link", direction=direction, pos=pos) # We want to compare each link against the next link. item1 = next(links, None) if item1 is None: return for item2 in links: # If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar. if direction=="previous": textRange=item1.textInfo.copy() textRange.collapse() textRange.setEndPoint(item2.textInfo,"startToEnd") else: textRange=item2.textInfo.copy() textRange.collapse() textRange.setEndPoint(item1.textInfo,"startToEnd") if self._isSuitableNotLinkBlock(textRange): yield TextInfoQuickNavItem("notLinkBlock", self, textRange) item1=item2 __gestures={ "kb:NVDA+d": "activateLongDesc", "kb:alt+upArrow": "collapseOrExpandControl", "kb:alt+downArrow": "collapseOrExpandControl", "kb:tab": "tab", "kb:shift+tab": "shiftTab", "kb:shift+,": "moveToStartOfContainer", "kb:,": "movePastEndOfContainer", } @script( description=_( # Translators: the description for the toggleScreenLayout script. "Toggles on and off if the screen layout is preserved while rendering the document content" ), gesture="kb:NVDA+v", ) def script_toggleScreenLayout(self, gesture): # Translators: The message reported for not supported toggling of screen layout ui.message(_("Not supported in this document."))
1.828125
2
qiskit_metal/qlibrary/qubits/Transmon_Interdigitated.py
PatrickSJacobs/qiskit-metal
0
119
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. #from math import * from math import sin, cos from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core.base import QComponent import numpy as np #from ... import config #if not config.is_building_docs(): # from qiskit_metal import is_true class TransmonInterdigitated(QComponent): """ The base "TransmonInterdigitated" inherits the "QComponent" class. This creates a transmon pocket with two large pads connected by a Josephson junction. Both pads have four interdigitated "fingers" which increase the capacitance of the structure. There are three coupling capacitor pads with qpins defined; these can be connected to other structures in a design using CPWs. Default Options: * pad_width: '1000um' -- width of the large rectanglular pads on either side of the junction * pad_height: '300um' -- height of the large rectanglular pads on either side of the junction * finger_width: '50um' -- width of the "finger" on either side of the junction * finger_height: '100um' -- height of the "finger" on the side of the junction * finger_space: '50um' -- height of the Josephson Junction (equivalently; space between two fingers) * pad_pos_x: '0um' -- the internal coordinate defining the center of the bottom rectangular pad * pad_pos_y: '0um' -- the internal coordinate defining the center of the bottom rectangular pad * comb_width: '50um' -- the width of the four interdigitated combs connected to either pad * comb_space_vert: '50um' -- the space between the edge of a comb and the edge of the opposite rectangular pad * comb_space_hor: '50um' -- the space between adjacent interdigitated comb structures * jj_width: '20um' -- the width of the Josephson Junction located between the two fingers of the device * cc_space: '50um' -- the space between the lower rectangular pad and the coupling capacitor below it * cc_width: '100um' -- the width of the coupling capacitor located below the bottom rectangular pad * cc_height: '100um' -- the height of the coupling capacitor located below the bottom rectangular pad * cc_topleft_space: '50um' -- the space between the upper rectangular pad and the top left coupling capacitor * cc_topleft_width: '100um' -- the width of the top left coupling capacitor pad * cc_topleft_height: '100um' -- the height of the top left coupling capacitor pad * cc_topright_space: '50um' -- the space between the upper rectangular pad and the top right coupling capacitor * cc_topright_width: '100um' -- the width of the top right coupling capacitor pad * cc_topright_height: '100um' -- the height of the top right coupling capacitor pad * position_x: '0um' -- the x-coordinate defining the center of the transmon pocket on the chip * position_y: '0um' -- the y-coordinate defining the center of the transmon pocket on the chip * rotation: '0.0' -- the angle at which the entire structure is rotated * rotation_top_pad: '180' -- internal coordinate defining the angle of rotation between top and bottom pads * layer: '1' -- all objcets are drawn assuming they are part of the same layer on a the chip """ # Default drawing options default_options = Dict(pad_width='1000um', pad_height='300um', finger_width='50um', finger_height='100um', finger_space='50um', pad_pos_x='0um', pad_pos_y='0um', comb_width='50um', comb_space_vert='50um', comb_space_hor='50um', jj_width='20um', cc_space='50um', cc_width='100um', cc_height='100um', cc_topleft_space='50um', cc_topleft_width='100um', cc_topleft_height='100um', cc_topright_space='50um', cc_topright_width='100um', cc_topright_height='100um', position_x='0um', position_y='0um', rotation='0.0', rotation_top_pad='180', layer='1') """Default drawing options""" # Name prefix of component, if user doesn't provide name component_metadata = Dict(short_name='component') """Component metadata""" def make(self): """Convert self.options into QGeometry.""" p = self.parse_options() # Parse the string options into numbers # draw the lower pad as a rectangle pad_lower = draw.rectangle(p.pad_width, p.pad_height, p.pad_pos_x, p.pad_pos_y) # draw the lower finger as a rectangle finger_lower = draw.rectangle( p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y + 0.49999 * (p.pad_height) + 0.49999 * (p.finger_height)) # draw the Josephson Junction rect_jj = draw.rectangle( p.jj_width, p.finger_space, p.pad_pos_x, 0.5 * (p.pad_height) + p.finger_height + 0.5 * (p.finger_space)) # draw the first comb to the right of the lower finger as a rectangle comb1_lower = draw.rectangle( p.comb_width, (2 * p.finger_height + p.finger_space - p.comb_space_vert), (0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width), (0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height)))) # draw the second comb to the right of the lower finger by translating the first comb comb2_lower = draw.translate(comb1_lower, 2.0 * (p.comb_space_hor + p.comb_width), 0.0) # draw the first comb to the left of the lower finger comb3_lower = draw.rectangle( p.comb_width, (2 * p.finger_height + p.finger_space - p.comb_space_vert), (-0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 * p.comb_width), (0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height)))) # draw the second comb to the left of the lower finger comb4_lower = draw.translate(comb3_lower, -2.0 * (p.comb_space_hor + p.comb_width), 0.0) coupling_capacitor = draw.rectangle( p.cc_width, p.cc_height, p.pad_pos_x, p.pad_pos_y - 0.5 * (p.pad_height) - p.cc_space - 0.5 * p.cc_height) cc_topleft = draw.rectangle( p.cc_topleft_width, p.cc_topleft_height, p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height) cc_topright = draw.translate( cc_topleft, p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width, 0.0) # merge the bottom elements bottom = draw.union(pad_lower, finger_lower, comb1_lower, comb2_lower, comb3_lower, comb4_lower) # create the top portion of the comb by translating and rotating # the bottom portion of the comb top = draw.translate(bottom, 0.0, p.pad_height + p.finger_space) top = draw.rotate(top, p.rotation_top_pad) # merge everything into a single design design = draw.union(bottom, top, rect_jj, coupling_capacitor, cc_topleft, cc_topright) # draw the transmon pocket bounding box pocket = draw.rectangle(1.5 * p.pad_width, 5.0 * p.pad_height) # the origin is originally set to the middle of the lower pad. # Let's move it to the center of the JJ. design = draw.translate( design, 0.0, -0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) # now translate the final structure according to the user input design = draw.rotate(design, p.rotation, origin=(0, 0)) design = draw.translate(design, p.position_x, p.position_y) pocket = draw.rotate(pocket, p.rotation, origin=(0, 0)) pocket = draw.translate(pocket, p.position_x, p.position_y) geom = {'design': design} geom_pocket = {'pocket': pocket} self.add_qgeometry('poly', geom, layer=p.layer, subtract=False) self.add_qgeometry('poly', geom_pocket, layer=p.layer, subtract=True) ################################################################### # Add Qpin connections for coupling capacitors # define a function that both rotates and translates the # qpin coordinates def qpin_rotate_translate(x): """ This function rotates the coordinates of the three qpins according to the user inputs for "position_x", "position_y" and "rotation". """ y = list(x) z = [0.0, 0.0] z[0] = y[0] * cos(p.rotation * 3.14159 / 180) - y[1] * sin( p.rotation * 3.14159 / 180) z[1] = y[0] * sin(p.rotation * 3.14159 / 180) + y[1] * cos( p.rotation * 3.14159 / 180) z[0] = z[0] + p.position_x z[1] = z[1] + p.position_y x = (z[0], z[1]) return x # Add Qpin connections for the bottom coupling capacitor qp1a = (0.0, -0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp1b = (0.0, -0.5 * p.pad_height - p.cc_space - p.cc_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) # rotate and translate the qpin coordinates qp1a = qpin_rotate_translate(qp1a) qp1b = qpin_rotate_translate(qp1b) self.add_pin('pin1', points=np.array([qp1a, qp1b]), width=0.01, input_as_norm=True) # Add Qpin connections for top left coupling capacitor qp2a = (p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp2b = (p.pad_pos_x - 0.5 * p.pad_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp2a = qpin_rotate_translate(qp2a) qp2b = qpin_rotate_translate(qp2b) self.add_pin('pin2', points=np.array([qp2a, qp2b]), width=0.01, input_as_norm=True) # Add Qpin connections for top right coupling capacitor qp3a = (p.pad_pos_x + 0.5 * p.pad_width - 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp3b = (p.pad_pos_x + 0.5 * p.pad_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp3a = qpin_rotate_translate(qp3a) qp3b = qpin_rotate_translate(qp3b) self.add_pin('pin3', points=np.array([qp3a, qp3b]), width=0.01, input_as_norm=True)
2.71875
3
sqlova/model/nl2sql/wikisql_models.py
guotong1988/Rule-SQL
15
120
# Copyright 2019-present NAVER Corp. # Apache License v2.0 # <NAME> import os, json from copy import deepcopy from matplotlib.pylab import * import torch import torch.nn as nn import torch.nn.functional as F device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from sqlova.utils.utils import topk_multi_dim from sqlova.utils.utils_wikisql import * class Seq2SQL_v1(nn.Module): def __init__(self, input_size, hidden_size, num_layer, dropout, number_cond_ops, number_agg_ops, old=False): super(Seq2SQL_v1, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.max_where_number = 4 self.number_cond_ops = number_cond_ops self.number_agg_ops = number_agg_ops self.select_column_predict = SelectColumnPredict(input_size, hidden_size, num_layer, dropout) self.select_agg_predict = SelectAggPredict(input_size, hidden_size, num_layer, dropout, number_agg_ops, old=old) self.where_number_predict = WhereNumberPredict(input_size, hidden_size, num_layer, dropout) self.wcp = WhereColumnPredict(input_size, hidden_size, num_layer, dropout) self.wop = WhereOpPredict(input_size, hidden_size, num_layer, dropout, number_cond_ops) self.wvp = WhereValuePredict_startend(input_size, hidden_size, num_layer, dropout, number_cond_ops, old=old) # start-end-search-discriminative model # emb_question, [16,26,1536] # len_question, [16] # emb_header, [102,12,1536] # len_header_token, [102] # number_header, [16] def forward(self, emb_question, len_question, emb_header, len_header_token, number_header, g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None, show_p_sc=False, show_p_sa=False, show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False): # sc s_sc,s_sc_softmax = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=show_p_sc) if g_sc: pr_sc = g_sc else: pr_sc = pred_sc(s_sc) # sa s_sa,s_sa_softmax = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, number_header, pr_sc, show_p_sa=show_p_sa) if g_sa: # it's not necessary though. pr_sa = g_sa else: pr_sa = pred_sa(s_sa) # wn s_wn,s_wn_softmax = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wn=show_p_wn) if g_wn: pr_wn = g_wn else: pr_wn = pred_wn(s_wn) # wc s_wc,s_wc_softmax = self.wcp(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wc=show_p_wc, penalty=True) if g_wc: pr_wc = g_wc else: pr_wc = pred_wherecolumn(pr_wn, s_wc) # wo s_wo,s_wo_softmax = self.wop(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, show_p_wo=show_p_wo) if g_wo: pr_wo = g_wo else: pr_wo = pred_wo(pr_wn, s_wo) # wv s_wv,s_wv_softmax = self.wvp(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, wo=pr_wo, show_p_wv=show_p_wv) return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, s_sc_softmax, s_sa_softmax, s_wn_softmax, s_wc_softmax, s_wo_softmax, s_wv_softmax def beam_forward(self, emb_question, len_question, emb_header, len_header_token, l_header, engine, tb, nlu_t, nlu_wp_t, wp_to_wh_index, nlu, beam_size=4, show_p_sc=False, show_p_sa=False, show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False): """ Execution-guided beam decoding. """ # sc s_sc,_ = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_sc=show_p_sc) prob_sc = F.softmax(s_sc, dim=-1) bS, mcL = s_sc.shape # minimum_header_length = min(l_header) # beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size # sa # Construct all possible sc_sa_score prob_sc_sa = torch.zeros([bS, beam_size, self.number_agg_ops]).to(device) prob_sca = torch.zeros_like(prob_sc_sa).to(device) # get the top-k indices. pr_sc_beam = [B, beam_size] pr_sc_beam = pred_sc_beam(s_sc, beam_size) # calculate and predict s_sa. for i_beam in range(beam_size): pr_sc = list( array(pr_sc_beam)[:,i_beam] ) s_sa,_ = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=show_p_sa) prob_sa = F.softmax(s_sa, dim=-1) prob_sc_sa[:, i_beam, :] = prob_sa prob_sc_selected = prob_sc[range(bS), pr_sc] # [B] prob_sca[:,i_beam,:] = (prob_sa.t() * prob_sc_selected).t() # [mcL, B] * [B] -> [mcL, B] (element-wise multiplication) # [mcL, B] -> [B, mcL] # Calculate the dimension of tensor # tot_dim = len(prob_sca.shape) # First flatten to 1-d idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True) # Now as sc_idx is already sorted, re-map them properly. idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx] idxs_arr = array(idxs) # [B, beam_size, remainig dim] # idxs[b][0] gives first probable [sc_idx, sa_idx] pairs. # idxs[b][1] gives of second. # Calculate prob_sca, a joint probability beam_idx_sca = [0] * bS beam_meet_the_final = [False] * bS while True: pr_sc = idxs_arr[range(bS),beam_idx_sca,0] pr_sa = idxs_arr[range(bS),beam_idx_sca,1] # map index properly check = check_sc_sa_pairs(tb, pr_sc, pr_sa) if sum(check) == bS: break else: for b, check1 in enumerate(check): if not check1: # wrong pair beam_idx_sca[b] += 1 if beam_idx_sca[b] >= beam_size: beam_meet_the_final[b] = True beam_idx_sca[b] -= 1 else: beam_meet_the_final[b] = True if sum(beam_meet_the_final) == bS: break # Now pr_sc, pr_sa are properly predicted. pr_sc_best = list(pr_sc) pr_sa_best = list(pr_sa) # Now, Where-clause beam search. s_wn,_ = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=show_p_wn) prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy() # Found "executable" most likely 4(=max_num_of_conditions) where-clauses. # wc s_wc,_ = self.wcp(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wc=show_p_wc, penalty=True) prob_wc = F.sigmoid(s_wc).detach().to('cpu').numpy() # pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc) # get max_wn # of most probable columns & their prob. pr_wn_max = [self.max_where_number] * bS pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column prob_wc_max = zeros([bS, self.max_where_number]) for b, pr_wc_max1 in enumerate(pr_wc_max): prob_wc_max[b,:] = prob_wc[b,pr_wc_max1] # get most probable max_wn where-clouses # wo s_wo_max,_ = self.wop(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, show_p_wo=show_p_wo) prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy() # [B, max_wn, n_cond_op] pr_wvi_beam_op_list = [] prob_wvi_beam_op_list = [] for i_op in range(self.number_cond_ops - 1): pr_wo_temp = [[i_op] * self.max_where_number] * bS # wv s_wv,_ = self.wvp(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, wo=pr_wo_temp, show_p_wv=show_p_wv) prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy() # prob_wv pr_wvi_beam, prob_wvi_beam = pred_wvi_se_beam(self.max_where_number, s_wv, beam_size) pr_wvi_beam_op_list.append(pr_wvi_beam) prob_wvi_beam_op_list.append(prob_wvi_beam) # pr_wvi_beam = [B, max_wn, k_logit**2 [st, ed] paris] # pred_wv_beam # Calculate joint probability of where-clause # prob_w = [batch, wc, wo, wv] = [B, max_wn, n_cond_op, n_pairs] n_wv_beam_pairs = prob_wvi_beam.shape[2] prob_w = zeros([bS, self.max_where_number, self.number_cond_ops - 1, n_wv_beam_pairs]) for b in range(bS): for i_wn in range(self.max_where_number): for i_op in range(self.number_cond_ops - 1): # do not use final one for i_wv_beam in range(n_wv_beam_pairs): # i_wc = pr_wc_max[b][i_wn] # already done p_wc = prob_wc_max[b, i_wn] p_wo = prob_wo_max[b, i_wn, i_op] p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam] prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv # Perform execution guided decoding conds_max = [] prob_conds_max = [] # while len(conds_max) < self.max_wn: idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True) # idxs = [B, i_wc_beam, i_op, i_wv_pairs] # Construct conds1 for b, idxs1 in enumerate(idxs): conds_max1 = [] prob_conds_max1 = [] for i_wn, idxs11 in enumerate(idxs1): i_wc = pr_wc_max[b][idxs11[0]] i_op = idxs11[1] wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]] # get wv_str temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [wp_to_wh_index[b]], [nlu[b]]) merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b]) conds11 = [i_wc, i_op, merged_wv11] prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2] ] # test execution # print(nlu[b]) # print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11]) pr_ans = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], [conds11]) if bool(pr_ans): # pr_ans is not empty! conds_max1.append(conds11) prob_conds_max1.append(prob_conds11) conds_max.append(conds_max1) prob_conds_max.append(prob_conds_max1) # May need to do more exhuastive search? # i.e. up to.. getting all executable cases. # Calculate total probability to decide the number of where-clauses pr_sql_i = [] prob_wn_w = [] pr_wn_based_on_prob = [] for b, prob_wn1 in enumerate(prob_wn): max_executable_wn1 = len( conds_max[b] ) prob_wn_w1 = [] prob_wn_w1.append(prob_wn1[0]) # wn=0 case. for i_wn in range(max_executable_wn1): prob_wn_w11 = prob_wn1[i_wn+1] * prob_conds_max[b][i_wn] prob_wn_w1.append(prob_wn_w11) pr_wn_based_on_prob.append(argmax(prob_wn_w1)) prob_wn_w.append(prob_wn_w1) pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]} pr_sql_i.append(pr_sql_i1) # s_wv = [B, max_wn, max_nlu_tokens, 2] return prob_sca, prob_w, prob_wn_w, pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_sql_i class SelectColumnPredict(nn.Module): def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3): super(SelectColumnPredict, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.W_att = nn.Linear(hidden_size, hidden_size) self.W_c = nn.Linear(hidden_size, hidden_size) self.W_header = nn.Linear(hidden_size, hidden_size) self.sc_out = nn.Sequential(nn.Tanh(), nn.Linear(2 * hidden_size, 1)) self.softmax_dim1 = nn.Softmax(dim=1) self.softmax_dim2 = nn.Softmax(dim=2) self.softmax_dim_1 = nn.Softmax(dim=-1) # emb_question, [16,26,1536] # len_question, [16] # emb_header, [102,12,1536] # len_header_token, [102] # number_header, [16] def forward(self, emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=False): # Encode encoded_question = encode(self.enc_n, emb_question, len_question, return_hidden=False, hc0=None, last_only=False) # [b, n, dim] encoded_header = encode_header(self.enc_h, emb_header, len_header_token, number_header) # [b, header, dim] bS = len(number_header) mL_n = max(len_question) # [bS, max_len_header, 100] * [bS, 100, mL_n] -> [bS, max_len_header, mL_n] att_h = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2)) # Penalty on blank parts for b, l_n1 in enumerate(len_question): if l_n1 < mL_n: att_h[b, :, l_n1:] = -10000000000 p_n = self.softmax_dim2(att_h) if show_p_sc: # p = [b, header, n] if p_n.shape[0] != 1: raise Exception("Batch size should be 1.") fig=figure(2001, figsize=(12,3.5)) # subplot(6,2,7) subplot2grid((7,2), (3, 0), rowspan=2) cla() _color='rgbkcm' _symbol='.......' for i_h in range(number_header[0]): color_idx = i_h % len(_color) plot(p_n[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7) title('sc: p_n for each h') grid(True) fig.tight_layout() fig.canvas.draw() show() # p_n [ bS, max_len_header, mL_n] -> [ bS, max_len_header, mL_n, 1] # wenc_n [ bS, mL_n, 100] -> [ bS, 1, mL_n, 100] # -> [bS, max_len_header, mL_n, 100] -> [bS, max_len_header, 100] c_n = torch.mul(p_n.unsqueeze(3), encoded_question.unsqueeze(1)).sum(dim=2) vec = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2) score_select_column = self.sc_out(vec).squeeze(2) # [bS, max_len_header, 1] -> [bS, max_len_header] score_select_column_softmax = self.softmax_dim_1(score_select_column) # Penalty max_len_header = max(number_header) for b, l_header1 in enumerate(number_header): if l_header1 < max_len_header: score_select_column[b, l_header1:] = -10000000000 for b, l_header1 in enumerate(number_header): if l_header1 < max_len_header: score_select_column_softmax[b, l_header1:] = 0 return score_select_column,score_select_column_softmax class SelectAggPredict(nn.Module): def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_agg_ops=-1, old=False): super(SelectAggPredict, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.W_att = nn.Linear(hidden_size, hidden_size) self.sa_out = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.Tanh(), nn.Linear(hidden_size, n_agg_ops)) # Fixed number of aggregation operator. self.softmax_dim1 = nn.Softmax(dim=1) self.softmax_dim2 = nn.Softmax(dim=2) self.softmax_dim_1 = nn.Softmax(dim=-1) if old: # for backwoard compatibility self.W_c = nn.Linear(hidden_size, hidden_size) self.W_header = nn.Linear(hidden_size, hidden_size) def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=False): # Encode encoded_question = encode(self.enc_n, emb_question, len_question, return_hidden=False, hc0=None, last_only=False) # [b, n, dim] encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim] bS = len(l_header) mL_n = max(len_question) wenc_header_ob = encoded_header[list(range(bS)), pr_sc] # list, so one sample for each batch. # [bS, question_len, 100] * [bS, 100, 1] -> [bS, question_len] att = torch.bmm(self.W_att(encoded_question), wenc_header_ob.unsqueeze(2)).squeeze(2) # Penalty on blank parts for b, l_n1 in enumerate(len_question): if l_n1 < mL_n: att[b, l_n1:] = -10000000000 # [bS, question_len] p = self.softmax_dim1(att) if show_p_sa: if p.shape[0] != 1: raise Exception("Batch size should be 1.") fig=figure(2001); subplot(7,2,3) cla() plot(p[0].data.numpy(), '--rs', ms=7) title('sa: nlu_weight') grid(True) fig.tight_layout() fig.canvas.draw() show() # [bS, question_len, 100] * ( [bS, question_len, 1] -> [bS, question_len, 100]) # -> [bS, question_len, 100] -> [bS, 100] c_n = torch.mul(encoded_question, p.unsqueeze(2).expand_as(encoded_question)).sum(dim=1) s_sa = self.sa_out(c_n) s_sa_softmax = self.softmax_dim_1(s_sa) return s_sa,s_sa_softmax class WhereNumberPredict(nn.Module): def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, ): super(WhereNumberPredict, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.mL_w = 4 # max where condition number self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.W_att_h = nn.Linear(hidden_size, 1) self.W_hidden = nn.Linear(hidden_size, num_layer * hidden_size) self.W_cell = nn.Linear(hidden_size, num_layer * hidden_size) self.W_att_n = nn.Linear(hidden_size, 1) self.wn_out = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.Tanh(), nn.Linear(hidden_size, self.mL_w + 1)) # max number (4 + 1) self.softmax_dim1 = nn.Softmax(dim=1) self.softmax_dim2 = nn.Softmax(dim=2) self.softmax_dim_1 = nn.Softmax(dim=-1) def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=False): # Encode encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, max_len_header, dim] bS = len(l_header) max_len_question = max(len_question) max_len_header = max(l_header) # mL_h = max(len_header_token) # (self-attention?) column Embedding? # [B, max_len_header, 100] -> [B, max_len_header, 1] -> [B, max_len_header] att_h = self.W_att_h(encoded_header).squeeze(2) # Penalty for b, l_header1 in enumerate(l_header): if l_header1 < max_len_header: att_h[b, l_header1:] = -10000000000 p_h = self.softmax_dim1(att_h) if show_p_wn: if p_h.shape[0] != 1: raise Exception("Batch size should be 1.") fig=figure(2001); subplot(7,2,5) cla() plot(p_h[0].data.numpy(), '--rs', ms=7) title('wn: header_weight') grid(True) fig.canvas.draw() show() # input('Type Eenter to continue.') # [B, max_len_header, 100] * [ B, max_len_header, 1] -> [B, max_len_header, 100] -> [B, 100] c_header = torch.mul(encoded_header, p_h.unsqueeze(2)).sum(1) # [B, 100] --> [B, 2*100] Enlarge because there are two layers. hidden = self.W_hidden(c_header) # [B, 4, 200/2] hidden = hidden.view(bS, self.num_layer * 2, int( self.hidden_size / 2)) # [4, B, 100/2] # number_of_layer_layer * (bi-direction) # lstm input convention. hidden = hidden.transpose(0, 1).contiguous() cell = self.W_cell(c_header) # [B, 4, 100/2] cell = cell.view(bS, self.num_layer * 2, int(self.hidden_size / 2)) # [4, B, 100/2] cell = cell.transpose(0, 1).contiguous() wenc_n = encode(self.enc_n, emb_question, len_question, return_hidden=False, hc0=(hidden, cell), last_only=False) # [b, n, dim] att_n = self.W_att_n(wenc_n).squeeze(2) # [B, max_len, 100] -> [B, max_len, 1] -> [B, max_len] # Penalty for b, l_n1 in enumerate(len_question): if l_n1 < max_len_question: att_n[b, l_n1:] = -10000000000 p_n = self.softmax_dim1(att_n) if show_p_wn: if p_n.shape[0] != 1: raise Exception("Batch size should be 1.") fig=figure(2001); subplot(7,2,6) cla() plot(p_n[0].data.numpy(), '--rs', ms=7) title('wn: nlu_weight') grid(True) fig.canvas.draw() show() # input('Type Enter to continue.') # [B, mL_n, 100] *([B, mL_n] -> [B, mL_n, 1] -> [B, mL_n, 100] ) -> [B, 100] c_n = torch.mul(wenc_n, p_n.unsqueeze(2).expand_as(wenc_n)).sum(dim=1) s_wn = self.wn_out(c_n) s_wn_softmax = self.softmax_dim_1(s_wn) return s_wn,s_wn_softmax # where column predict class WhereColumnPredict(nn.Module): def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3): super(WhereColumnPredict, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.W_att = nn.Linear(hidden_size, hidden_size) self.W_c = nn.Linear(hidden_size, hidden_size) self.W_header = nn.Linear(hidden_size, hidden_size) self.W_out = nn.Sequential( nn.Tanh(), nn.Linear(2 * hidden_size, 1) ) self.softmax_dim1 = nn.Softmax(dim=1) self.softmax_dim2 = nn.Softmax(dim=2) self.softmax_dim_1 = nn.Softmax(dim=-1) def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, show_p_wc, penalty=True): # Encode encoded_question = encode(self.enc_n, emb_question, len_question, return_hidden=False, hc0=None, last_only=False) # [b, n, dim] encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim] # attention # wenc = [bS, mL, hidden_size] # att = [bS, max_len_header, mL_n] # att[b, i_h, j_n] = p(j_n| i_h) att = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2)) # penalty to blank part. mL_n = max(len_question) for b_n, l_n1 in enumerate(len_question): if l_n1 < mL_n: att[b_n, :, l_n1:] = -10000000000 # make p(j_n | i_h) p = self.softmax_dim2(att) if show_p_wc: # p = [b, header, n] if p.shape[0] != 1: raise Exception("Batch size should be 1.") fig=figure(2001); # subplot(6,2,7) subplot2grid((7,2), (3, 1), rowspan=2) cla() _color='rgbkcm' _symbol='.......' for i_h in range(l_header[0]): color_idx = i_h % len(_color) plot(p[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7) title('wc: p_n for each h') grid(True) fig.tight_layout() fig.canvas.draw() show() # max nlu context vectors # [bS, max_len_header, mL_n]*[bS, max_len_header, mL_n] encoded_question = encoded_question.unsqueeze(1) # [ b, n, dim] -> [b, 1, n, dim] p = p.unsqueeze(3) # [b, header, n] -> [b, header, n, 1] c_n = torch.mul(encoded_question, p).sum(2) # -> [b, header, dim], c_n for each header. y = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2) # [b, header, 2*dim] score = self.W_out(y).squeeze(2) # [b, header] score[torch.isnan(score)] = 0 score_softmax = self.softmax_dim_1(score) if penalty: for b, l_header1 in enumerate(l_header): score[b, l_header1:] = -1e+10 for b, l_header1 in enumerate(l_header): score_softmax[b, l_header1:] = 0 return score,score_softmax # where op predict class WhereOpPredict(nn.Module): def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=3): super(WhereOpPredict, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.mL_w = 4 # max where condition number self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.W_att = nn.Linear(hidden_size, hidden_size) self.W_c = nn.Linear(hidden_size, hidden_size) self.W_header = nn.Linear(hidden_size, hidden_size) self.wo_out = nn.Sequential( nn.Linear(2*hidden_size, hidden_size), nn.Tanh(), nn.Linear(hidden_size, n_cond_ops) ) self.softmax_dim1 = nn.Softmax(dim=1) self.softmax_dim2 = nn.Softmax(dim=2) self.softmax_dim_1 = nn.Softmax(dim=-1) def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, wn, wc, wenc_n=None, show_p_wo=False): # Encode if not wenc_n: wenc_n = encode(self.enc_n, emb_question, len_question, return_hidden=False, hc0=None, last_only=False) # [b, n, dim] encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim] bS = len(l_header) # wn wenc_header_ob = [] # observed header for b in range(bS): # [[...], [...]] # Pad list to maximum number of selections real = [encoded_header[b, col] for col in wc[b]] pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later. wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function. wenc_header_ob.append(wenc_header_ob1) # list to [B, 4, dim] tensor. wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor. wenc_header_ob = wenc_header_ob.to(device) # [B, 1, mL_n, dim] * [B, 4, dim, 1] # -> [B, 4, mL_n, 1] -> [B, 4, mL_n] # multiplication bewteen NLq-tokens and selected column att = torch.matmul(self.W_att(wenc_n).unsqueeze(1), wenc_header_ob.unsqueeze(3) ).squeeze(3) # Penalty for blank part. mL_n = max(len_question) for b, l_n1 in enumerate(len_question): if l_n1 < mL_n: att[b, :, l_n1:] = -10000000000 p = self.softmax_dim2(att) # p( n| selected_col ) if show_p_wo: # p = [b, header, n] if p.shape[0] != 1: raise Exception("Batch size should be 1.") fig=figure(2001) # subplot(6,2,7) subplot2grid((7,2), (5, 0), rowspan=2) cla() _color='rgbkcm' _symbol='.......' for i_wn in range(self.mL_w): color_idx = i_wn % len(_color) plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7) title('wo: p_n for selected h') grid(True) fig.tight_layout() fig.canvas.draw() show() # [B, 1, mL_n, dim] * [B, 4, mL_n, 1] # --> [B, 4, mL_n, dim] # --> [B, 4, dim] c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2) # [bS, 5-1, dim] -> [bS, 5-1, 3] vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob)], dim=2) s_wo = self.wo_out(vec) s_wo_softmax = self.softmax_dim_1(s_wo) return s_wo,s_wo_softmax class WhereValuePredict_startend(nn.Module): """ Discriminative model Get start and end. Here, classifier for [ [투수], [팀1], [팀2], [연도], ...] Input: Encoded nlu & selected column. Algorithm: Encoded nlu & selected column. -> classifier -> mask scores -> ... """ def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=4, old=False): super(WhereValuePredict_startend, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.n_cond_ops = n_cond_ops self.mL_w = 4 # max where condition number self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.W_att = nn.Linear(hidden_size, hidden_size) self.W_c = nn.Linear(hidden_size, hidden_size) self.W_header = nn.Linear(hidden_size, hidden_size) self.W_op = nn.Linear(n_cond_ops, hidden_size) # self.W_n = nn.Linear(hidden_size, hidden_size) if old: self.wv_out = nn.Sequential( nn.Linear(4 * hidden_size, 2) ) else: self.wv_out = nn.Sequential( nn.Linear(4 * hidden_size, hidden_size), nn.Tanh(), nn.Linear(hidden_size, 2) ) # self.wv_out = nn.Sequential( # nn.Linear(3 * hidden_size, hidden_size), # nn.Tanh(), # nn.Linear(hidden_size, self.gdkL) # ) self.softmax_dim1 = nn.Softmax(dim=1) self.softmax_dim2 = nn.Softmax(dim=2) self.softmax_dim_1 = nn.Softmax(dim=-1) def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, wn, wc, wo, wenc_n=None, show_p_wv=False): # Encode if not wenc_n: wenc_n, hout, cout = encode(self.enc_n, emb_question, len_question, return_hidden=True, hc0=None, last_only=False) # [b, n, dim] encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim] bS = len(l_header) wenc_header_ob = [] # observed header for b in range(bS): # [[...], [...]] # Pad list to maximum number of selections real = [encoded_header[b, col] for col in wc[b]] pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later. wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function. wenc_header_ob.append(wenc_header_ob1) # list to [B, 4, dim] tensor. wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor. wenc_header_ob = wenc_header_ob.to(device) # Column attention # [B, 1, mL_n, dim] * [B, 4, dim, 1] # -> [B, 4, mL_n, 1] -> [B, 4, mL_n] # multiplication bewteen NLq-tokens and selected column att = torch.matmul(self.W_att(wenc_n).unsqueeze(1), wenc_header_ob.unsqueeze(3) ).squeeze(3) # Penalty for blank part. mL_n = max(len_question) for b, l_n1 in enumerate(len_question): if l_n1 < mL_n: att[b, :, l_n1:] = -10000000000 p = self.softmax_dim2(att) # p( n| selected_col ) if show_p_wv: # p = [b, header, n] if p.shape[0] != 1: raise Exception("Batch size should be 1.") fig=figure(2001) # subplot(6,2,7) subplot2grid((7,2), (5, 1), rowspan=2) cla() _color='rgbkcm' _symbol='.......' for i_wn in range(self.mL_w): color_idx = i_wn % len(_color) plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7) title('wv: p_n for selected h') grid(True) fig.tight_layout() fig.canvas.draw() show() # [B, 1, mL_n, dim] * [B, 4, mL_n, 1] # --> [B, 4, mL_n, dim] # --> [B, 4, dim] c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2) # Select observed headers only. # Also generate one_hot vector encoding info of the operator # [B, 4, dim] wenc_op = [] for b in range(bS): # [[...], [...]] # Pad list to maximum number of selections wenc_op1 = torch.zeros(self.mL_w, self.n_cond_ops) wo1 = wo[b] idx_scatter = [] l_wo1 = len(wo1) for i_wo11 in range(self.mL_w): if i_wo11 < l_wo1: wo11 = wo1[i_wo11] idx_scatter.append([int(wo11)]) else: idx_scatter.append([0]) # not used anyway wenc_op1 = wenc_op1.scatter(1, torch.tensor(idx_scatter), 1) wenc_op.append(wenc_op1) # list to [B, 4, dim] tensor. wenc_op = torch.stack(wenc_op) # list to tensor. wenc_op = wenc_op.to(device) # Now after concat, calculate logits for each token # [bS, 5-1, 3*hidden_size] = [bS, 4, 300] vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob), self.W_op(wenc_op)], dim=2) # Make extended vector based on encoded nl token containing column and operator information. # wenc_n = [bS, mL, 100] # vec2 = [bS, 4, mL, 400] vec1e = vec.unsqueeze(2).expand(-1,-1, mL_n, -1) # [bS, 4, 1, 300] -> [bS, 4, mL, 300] wenc_ne = wenc_n.unsqueeze(1).expand(-1, 4, -1, -1) # [bS, 1, mL, 100] -> [bS, 4, mL, 100] vec2 = torch.cat( [vec1e, wenc_ne], dim=3) # now make logits s_wv = self.wv_out(vec2) # [bS, 4, mL, 400] -> [bS, 4, mL, 2] s_wv_softmax = self.softmax_dim_1(s_wv) # penalty for spurious tokens for b, l_n1 in enumerate(len_question): if l_n1 < mL_n: s_wv[b, :, l_n1:, :] = -10000000000 for b, l_n1 in enumerate(len_question): if l_n1 < mL_n: s_wv_softmax[b, :, l_n1:, :] = 0 return s_wv,s_wv_softmax def Loss_selectwhere_startend_v2(score_select_column, s_sa, s_wn, s_wc, s_wo, s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi): """ :param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return: """ loss = 0 # loss += Loss_sc(score_select_column, ground_truth_select_column) # loss += Loss_sa(s_sa, g_sa) # loss += Loss_wn(s_wn, g_wn) # loss += Loss_wc(s_wc, g_wc) # loss += Loss_wo(s_wo, g_wn, g_wo) # loss += Loss_wv_se(s_wv, g_wn, g_wvi) return loss def Loss_sw_se(score_select_column, s_sa, s_wn, s_wc, s_wo, s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi): """ :param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return: """ loss = 0 loss += Loss_sc(score_select_column, ground_truth_select_column) loss += Loss_sa(s_sa, g_sa) loss += Loss_wn(s_wn, g_wn) loss += Loss_wc(s_wc, g_wc) loss += Loss_wo(s_wo, g_wn, g_wo) loss += Loss_wv_se(s_wv, g_wn, g_wvi) return loss def Loss_sc(s_sc, g_sc): loss = F.cross_entropy(s_sc, torch.tensor(g_sc).to(device)) return loss def Loss_sa(s_sa, g_sa): loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device)) return loss def Loss_wn(s_wn, g_wn): loss = F.cross_entropy(s_wn, torch.tensor(g_wn).to(device)) return loss def Loss_wc(s_wc, g_wc): # Construct index matrix bS, max_h_len = s_wc.shape im = torch.zeros([bS, max_h_len]).to(device) for b, g_wc1 in enumerate(g_wc): for g_wc11 in g_wc1: im[b, g_wc11] = 1.0 # Construct prob. p = F.sigmoid(s_wc) loss = F.binary_cross_entropy(p, im) return loss def Loss_wo(s_wo, g_wn, g_wo): # Construct index matrix loss = 0 for b, g_wn1 in enumerate(g_wn): if g_wn1 == 0: continue g_wo1 = g_wo[b] s_wo1 = s_wo[b] loss += F.cross_entropy(s_wo1[:g_wn1], torch.tensor(g_wo1).to(device)) return loss def Loss_wv_se(s_wv, g_wn, g_wvi): """ s_wv: [bS, 4, mL, 2], 4 stands for maximum # of condition, 2 tands for start & end logits. g_wvi: [ [1, 3, 2], [4,3] ] (when B=2, wn(b=1) = 3, wn(b=2) = 2). """ loss = 0 # g_wvi = torch.tensor(g_wvi).to(device) for b, g_wvi1 in enumerate(g_wvi): # for i_wn, g_wvi11 in enumerate(g_wvi1): g_wn1 = len(g_wvi1) # 有改动 # g_wn1 = g_wn[b] # 有改动 if g_wn1 == 0: continue g_wvi1 = torch.tensor(g_wvi1)[:g_wn1].to(device) # 有改动 g_st1 = g_wvi1[:,0] g_ed1 = g_wvi1[:,1] # loss from the start position loss += F.cross_entropy(s_wv[b,:g_wn1,:,0], g_st1) # print("st_login: ", s_wv[b,:g_wn1,:,0], g_st1, loss) # loss from the end position loss += F.cross_entropy(s_wv[b,:g_wn1,:,1], g_ed1) # print("ed_login: ", s_wv[b,:g_wn1,:,1], g_ed1, loss) return loss # ========= Decoder-Layer =========== class FT_s2s_1(nn.Module): """ Decoder-Layer """ def __init__(self, input_size, hidden_size, num_layer, dropout, max_seq_length, n_cond_ops, n_agg_ops, old=False): super(FT_s2s_1, self).__init__() self.input_size = input_size # input_size self.hidden_size = hidden_size # hidden_size self.ls = num_layer self.dropout = dropout self.n_cond_ops = n_cond_ops self.n_agg_ops = n_agg_ops self.n_where_num = 4 self.decoder_s2s = Decoder_s2s(input_size, hidden_size, num_layer, dropout, max_seq_length) def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None): score = self.decoder_s2s(wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs) return score def EG_forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, pnt_end_tok, i_sql_vocab, i_nlu, i_hds, # for EG tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG tb, engine, beam_size=4, beam_only=True): """ EG-guided beam-search """ score = self.decoder_s2s.EG_forward(wenc_s2s, l_input, cls_vec, pnt_start_tok, pnt_end_tok, i_sql_vocab, i_nlu, i_hds, # for EG tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG tb, engine, beam_size, beam_only) return score class Decoder_s2s(nn.Module): def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, max_seq_length=222, n_cond_ops=3): super(Decoder_s2s, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.mL = max_seq_length self.Tmax = 200 self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2), num_layers=num_layer, batch_first=True, dropout=dropout, bidirectional=True) self.decode_pn = nn.LSTM(input_size=max_seq_length, hidden_size=hidden_size, num_layers=num_layer, batch_first=True, dropout=dropout) self.W_s2s = nn.Linear(input_size, hidden_size) self.W_pnt = nn.Linear(hidden_size, hidden_size) self.wv_out = nn.Sequential(nn.Tanh(), nn.Linear(hidden_size, 1)) def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None,): # Encode bS, mL_input, input_size = wenc_s2s.shape # Now, pointer network. ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200] ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme # initial (current) pointer cpnt = ipnt # reshape wenc_s2s to incorporate T later wenc_s2s = wenc_s2s.unsqueeze(1) # h_0 and c_0 from cls_vec # They are not bidirectional. h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device) c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device) for i_layer in range(self.num_layer): h_st = (2*i_layer)*self.hidden_size h_ed = h_st + self.hidden_size c_st = (2*i_layer+1)*self.hidden_size c_ed = c_st + self.hidden_size h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim] c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim] if g_pnt_idxs: pnt_n = torch.zeros(bS, self.Tmax, mL_input).to(device) # one hot # assign index for b, g_pnt_idxs1 in enumerate(g_pnt_idxs): for t, g_pnt_idx in enumerate(g_pnt_idxs1): pnt_n[b, t, g_pnt_idx] = 1 # Encode dec_pn, _ = self.decode_pn(pnt_n, (h_0, c_0)) dec_pn = dec_pn.contiguous() # [bS, T, input_size] dec_pn = dec_pn.unsqueeze(2) # Calculate score s_wv = self.wv_out( self.W_s2s(wenc_s2s) + self.W_pnt(dec_pn) ).squeeze(3) # [B, T, mL_input, dim] -> [B, T, mL_input, 1] -> [B, T, mL_input] # s_wv = [B, 4, T, mL_n] = [batch, conds, token idx, score] # penalty for b, l_input1 in enumerate(l_input): if l_input1 < mL_input: s_wv[b, :, l_input1:] = -10000000000 else: t = 0 s_wv_list = [] cpnt_h = (h_0, c_0) while t < self.Tmax: dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm # [B, 1, 100] -> [B, 1, 1, 100] dec_pn = dec_pn.unsqueeze(2) # [bS, T, input_size] # get score s_wv1 = self.wv_out( self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim] + self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1 ).squeeze(3) # s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score] # -> [B, 4, mL_n] # Masking -- for b, l_input1 in enumerate(l_input): if l_input1 < mL_input: s_wv1[b, :, l_input1:] = -10000000000 # Collect score-- s_wv_list.append(s_wv1) # [B, 1, mL_input] -> [B, mL_n] -> [bS*(5-1)] # (max_val, max_indices) _val, pnt_n = s_wv1.view(bS, -1).max(dim=1) # formatting pnt_n as a one-hot input. cpnt = torch.zeros(bS, mL_input).to(device) # cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device) cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1) cpnt = cpnt.unsqueeze(1) # --> [B * 4, 1, 200] t += 1 s_wv = torch.stack(s_wv_list, 1) # [B, s_wv = s_wv.squeeze(2) # # # Following lines seems to be unnecessary. # # Penalty to blank parts # for b, l_input1 in enumerate(l_input): # if l_input1 < mL_input: # s_wv[b, :, l_input1:] = -10000000000 return s_wv def EG_forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, pnt_end_tok, i_sql_vocab, i_nlu, i_hds, # for EG tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG tb, engine, beam_size, beam_only=True): # Encode bS, mL_input, input_size = wenc_s2s.shape # reshape wenc_s2s to incorperate T later wenc_s2s = wenc_s2s.unsqueeze(1) # h_0 and c_0 from cls_vec # They are not bidirectional. h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device) c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device) for i_layer in range(self.num_layer): h_st = (2*i_layer)*self.hidden_size h_ed = h_st + self.hidden_size c_st = (2*i_layer+1)*self.hidden_size c_ed = c_st + self.hidden_size h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim] c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim] # initial (current) pointer pnt_list_beam = [] cpnt_beam = [] cpnt_h_beam = [] for i_beam in range(beam_size): pnt_list_beam1 = [] for b in range(bS): pnt_list_beam1.append( [ [pnt_start_tok], 0] ) pnt_list_beam.append(pnt_list_beam1) # initisl cpnt # Now, initialize pointer network. ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200] # Distort ipnt by i_bam on purpose to avoid initial duplication of beam-search ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme cpnt_beam.append(ipnt) cpnt_h_beam.append( (h_0, c_0) ) t = 0 while t < self.Tmax: # s_wv1_beam = [] candidates = [ [] for b in range(bS) ] # [bS] # Generate beam for i_beam, cpnt in enumerate(cpnt_beam): cpnt_h = cpnt_h_beam[i_beam] pnt_list_beam1 = pnt_list_beam[i_beam] dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm cpnt_h_beam[i_beam] = cpnt_h # [B, 1, 100] -> [B, 1, 1, 100] dec_pn = dec_pn.unsqueeze(2) # [bS, T, input_size] # get score s_wv1 = self.wv_out( self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim] + self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1 ).squeeze(3) # s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score] # -> [B, 4, mL_n] # Masking -- for b, l_input1 in enumerate(l_input): if l_input1 < mL_input: s_wv1[b, :, l_input1:] = -10000000000 # Get the candidates only among the input space. prob, idxs = F.softmax(s_wv1.view(bS, -1), dim=1).topk(dim=1, k=max(l_input)) log_prob = torch.log(prob) # [bS, beam_size] for b, log_prob1 in enumerate(log_prob): pnt_list11, score = pnt_list_beam1[b] for i_can, log_prob11 in enumerate(log_prob1): # no update if last token was the end-token previous_pnt = pnt_list11[-1] if previous_pnt== pnt_end_tok: new_seq = pnt_list11 new_score = score else: new_seq = pnt_list11 + [idxs[b][i_can].item()] new_score = score + log_prob11.item() _candidate = [new_seq, new_score] candidates[b].append(_candidate) # Execution-guided beam filtering for b, candidates1 in enumerate(candidates): new_pnt_list_batch1 = sorted(candidates1, key=lambda list1: list1[-1], reverse=True) count = 0 selected_candidates1 = [] for new_pnt_list_batch11 in new_pnt_list_batch1: if new_pnt_list_batch11 not in selected_candidates1: if beam_only: selected_candidates1.append(new_pnt_list_batch11) pnt_list_beam[count][b] = new_pnt_list_batch11 count +=1 else: # Need to be modified here. executable = False testable = False pr_i_vg_list, pr_i_vg_sub_list = gen_i_vg_from_pnt_idxs([new_pnt_list_batch11[0]], [i_sql_vocab[b]], [i_nlu[b]], [i_hds[b]]) pr_sql_q_s2s, pr_sql_i = gen_sql_q_from_i_vg([tokens[b]], [nlu[b]], [nlu_t[b]], [hds[b]], [tt_to_t_idx[b]], pnt_start_tok, pnt_end_tok, [new_pnt_list_batch11[0]], pr_i_vg_list, pr_i_vg_sub_list) # check testability from select-clause try: # check whether basic elements presents in pr_sql_i # If so, it is testable. idx_agg = pr_sql_i[0]["agg"] idx_sel = pr_sql_i[0]["sel"] testable = True except: testable = False pass # check the presence of conds if testable: try: conds = pr_sql_i[0]["conds"] except: conds = [] try: pr_ans1 = engine.execute(tb[b]['id'], idx_sel, idx_agg, conds) executable = bool(pr_ans1) except: executable = False # if testable: if executable: add_candidate = True else: add_candidate = False else: add_candidate = True if add_candidate: selected_candidates1.append(new_pnt_list_batch11) pnt_list_beam[count][b] = new_pnt_list_batch11 count += 1 if count == beam_size: break if count < beam_size: # not executable at all.. # add junk sequence. for i_junk in range(count, beam_size): pnt_list_beam[i_junk][b] = [[pnt_end_tok],-9999999] # generate cpnt # formatting pnt_n as a one-hot input. for i_beam in range(beam_size): cpnt = torch.zeros(bS, mL_input).to(device) # cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device) idx_batch = [seq_score[0][-1] for seq_score in pnt_list_beam[i_beam]] pnt_n = torch.tensor(idx_batch).to(device) cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1) cpnt = cpnt.unsqueeze(1) # --> [B, t=1, mL_input] cpnt_beam[i_beam] = cpnt t += 1 # Generate best pr_pnt_list, p_tot pr_pnt_idxs = [] p_list = [] for b in range(bS): pnt_list_beam_best = pnt_list_beam[0] pr_pnt_idxs.append(pnt_list_beam_best[b][0]) p_list.append( pnt_list_beam_best[b][1]) return pr_pnt_idxs, p_list, pnt_list_beam # ============= Shallow-Layer =============== class FT_Scalar_1(nn.Module): """ Shallow-Layer """ def __init__(self, input_size, hidden_size, num_layer, dropout, n_cond_ops, n_agg_ops, old=False): super(FT_Scalar_1, self).__init__() self.input_size = input_size # input_size self.hidden_size = hidden_size self.num_layer = num_layer self.dropout = dropout self.n_cond_ops = n_cond_ops self.n_agg_ops = n_agg_ops self.n_where_num = 4 def scp(self, wemb_h, l_header): bS, max_header_len, _ = wemb_h.shape # s_sc s_sc = torch.zeros(bS, max_header_len).to(device) s_sc[:, :] = wemb_h[:, :, 0] # s_sc = [B, max_header length, 1] # s_sc[:,:] = F.tanh(wemb_h[:,:,0]) # s_sc = [B, max_header length, 1] # s_sc = s_sc.squeeze(2) # masking # print(f"s_sc {s_sc}") for b, l_header1 in enumerate(l_header): s_sc[b, l_header1:] = -9999999999.0 return s_sc def sap(self, wemb_h, pr_sc, idx_st, idx_ed): bS, max_header_len, _ = wemb_h.shape # select of aggregation operator s_sa = torch.zeros([bS, self.n_agg_ops]).to(device) for b, pr_sc1 in enumerate(pr_sc): s_sa[b,:] = wemb_h[b,pr_sc1,idx_st:idx_ed] return s_sa def wnp(self, cls_vec): bS = cls_vec.shape[0] # [B,hidden_size] -> [B, n_where_num+1] s_wn = torch.zeros(bS, (self.n_where_num + 1)).to(device) s_wn[:, :] = cls_vec[:, 0:(self.n_where_num + 1)] return s_wn def wcp(self, wemb_h, l_header, idx_st, idx_ed): bS, max_header_len, _ = wemb_h.shape s_wc = torch.zeros(bS, max_header_len, 1).to(device) s_wc[:, :, :] = wemb_h[:, :, idx_st:idx_ed] s_wc = s_wc.squeeze(2) # [B, max_header_length] # masking for b, l_header1 in enumerate(l_header): s_wc[b, l_header1:] = -99999999999.0 return s_wc def wop(self, wemb_h, pr_wc, idx_st, idx_ed): bS, max_header_len, _ = wemb_h.shape s_wo = torch.zeros([bS, self.n_where_num, self.n_cond_ops]).to(device) for b, pr_wc1 in enumerate(pr_wc): if len(pr_wc1) > 0: s_wo[b, 0:len(pr_wc1), :] = wemb_h[b, pr_wc1, idx_st:idx_ed] else: pass return s_wo def wvp(self, emb_question, len_question, pr_wc): bS, _, _ = emb_question.shape s_wv = torch.zeros([bS, self.n_where_num, max(len_question), 2]).to(device) for b, pr_wc1 in enumerate(pr_wc): if len(pr_wc1) > 0: # start logit s_wv[b, 0:len(pr_wc1), :, 0] = emb_question[b, :, pr_wc1].transpose(0, 1) # end logit s_wv[b, 0:len(pr_wc1), :, 1] = emb_question[b, :, [pr_wc11 + 100 for pr_wc11 in pr_wc1]].transpose(0, 1) else: pass # masking # penalty for spurious tokens for b, l_n1 in enumerate(len_question): if l_n1 < max(len_question): s_wv[b, :, l_n1:, :] = -1e+11 return s_wv def forward(self, emb_question, len_question, wemb_h, l_header, cls_vec, g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None, show_p_sc=False, show_p_sa=False, show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False): # emb_question = [B, max_nlu_token_length, hidden_size] # here, # of target_layer is fixed to 1. # wemb_h = [B, max_header #, hidden_size] s_sc = self.scp(wemb_h, l_header) if g_sc: pr_sc = g_sc else: pr_sc = pred_sc(s_sc) # s_sa idx_st = 1 idx_ed = 1 + self.n_agg_ops s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed) if g_sa: pr_sa = g_sa else: pr_sa = pred_sa(s_sa) # where_number s_wn = self.wnp(cls_vec) if g_wn: pr_wn = g_wn else: pr_wn = pred_wn(s_wn) # wc idx_st = idx_ed+1 idx_ed = idx_st+1 s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed) if g_wc: pr_wc = g_wc else: pr_wc = pred_wherecolumn(pr_wn, s_wc) # wo idx_st = idx_ed+1 idx_ed = idx_st + self.n_cond_ops s_wo = self.wop(wemb_h, pr_wc, idx_st, idx_ed) if g_wo: pr_wo = g_wo else: pr_wo = pred_wo(pr_wn, s_wo) # wv # s_wv = [bS, 4, mL, 2] s_wv = self.wvp(emb_question, len_question, pr_wc) # print(s_wv) # s_wv = F.tanh(s_wv) return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv def forward_EG(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb, nlu_t, nlu_tt, tt_to_t_idx, nlu, beam_size=4): """ Execution-guided beam decoding. Essentially identical with that of NL2SQL Layer. """ # Select-clause prob_sca, pr_sc_best, pr_sa_best, \ p_sc_best, p_sa_best, p_select \ = self.EG_decoding_select(wemb_h, l_header, tb, beam_size=beam_size) # Where-clause prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \ p_where, p_wn_best, p_wc_best, p_wo_best, p_wvi_best \ = self.EG_decoding_where(emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb, nlu_t, nlu_tt, tt_to_t_idx, nlu, pr_sc_best, pr_sa_best, beam_size=4) p_tot = cal_prob_tot(p_select, p_where) return pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_wvi_best, \ pr_sql_i, p_tot, p_select, p_where, p_sc_best, p_sa_best, \ p_wn_best, p_wc_best, p_wo_best, p_wvi_best def EG_decoding_select(self, wemb_h, l_header, tb, beam_size=4, show_p_sc=False, show_p_sa=False): # sc s_sc = self.scp(wemb_h, l_header) prob_sc = F.softmax(s_sc, dim=-1) bS, mcL = s_sc.shape # minimum_header_length = min(l_header) # beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size # sa # Construct all possible sc_sa_score prob_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device) score_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device) prob_sca = torch.zeros_like(prob_sc_sa).to(device) # get the top-k indices. pr_sc_beam = [B, beam_size] pr_sc_beam = pred_sc_beam(s_sc, beam_size) # calculate and predict s_sa. idx_st = 1 idx_ed = 1 + self.n_agg_ops for i_beam in range(beam_size): pr_sc = list(array(pr_sc_beam)[:, i_beam]) s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed) prob_sa = F.softmax(s_sa, dim=-1) prob_sc_sa[:, i_beam, :] = prob_sa score_sc_sa[:, i_beam, :] = s_sa prob_sc_selected = prob_sc[range(bS), pr_sc] # [B] prob_sca[:, i_beam, :] = (prob_sa.t() * prob_sc_selected).t() # [mcL, B] * [B] -> [mcL, B] (element-wise multiplication) # [mcL, B] -> [B, mcL] # Calculate the dimension of tensor # tot_dim = len(prob_sca.shape) idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True) # Now as sc_idx is already sorted, re-map them properly. idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx] idxs_arr = array(idxs) # [B, beam_size, remainig dim] # idxs[b][0] gives first probable [sc_idx, sa_idx] pairs. # idxs[b][1] gives of second. # Calculate prob_sca, a joint probability beam_idx_sca = [0] * bS beam_meet_the_final = [False] * bS while True: pr_sc = idxs_arr[range(bS), beam_idx_sca, 0] pr_sa = idxs_arr[range(bS), beam_idx_sca, 1] # map index properly check = check_sc_sa_pairs(tb, pr_sc, pr_sa) if sum(check) == bS: break else: for b, check1 in enumerate(check): if not check1: # wrong pair beam_idx_sca[b] += 1 if beam_idx_sca[b] >= beam_size: beam_meet_the_final[b] = True beam_idx_sca[b] -= 1 else: beam_meet_the_final[b] = True if sum(beam_meet_the_final) == bS: break # Now pr_sc, pr_sa are properly predicted. pr_sc_best = list(pr_sc) pr_sa_best = list(pr_sa) # output for later analysis. p_sc_best = cal_prob_sc(s_sc, pr_sc_best) p_sa_best = cal_prob_sa(score_sc_sa[range(bS), beam_idx_sca, :].squeeze(1), pr_sa_best) p_select = cal_prob_select(p_sc_best, p_sa_best) # p_select = prob_sca[range(bS),beam_idx_sca,pr_sa_best].detach().to('cpu').numpy() return prob_sca, pr_sc_best, pr_sa_best, p_sc_best, p_sa_best, p_select def EG_decoding_where(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb, nlu_t, nlu_wp_t, tt_to_t_idx, nlu, pr_sc_best, pr_sa_best, beam_size=4, show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False): bS, max_header_len, _ = wemb_h.shape # Now, Where-clause beam search. idx_st = 1 idx_ed = 1 + self.n_agg_ops s_wn = self.wnp(cls_vec) prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy() # Found "executable" most likely 4(=max_num_of_conditions) where-clauses. # wc idx_st = idx_ed + 1 idx_ed = idx_st + 1 s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed) prob_wc = torch.sigmoid(s_wc).detach().to('cpu').numpy() # pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc) # get max_wn # of most probable columns & their prob. pr_wn_max = [self.n_where_num] * bS pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column prob_wc_max = zeros([bS, self.n_where_num]) for b, pr_wc_max1 in enumerate(pr_wc_max): prob_wc_max[b, :] = prob_wc[b, pr_wc_max1] # get most probable n_where_num where-clouses # wo idx_st = idx_ed + 1 idx_ed = idx_st + self.n_cond_ops s_wo_max = self.wop(wemb_h, pr_wc_max, idx_st, idx_ed) prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy() # [B, n_where_num, n_cond_op] pr_wvi_beam_op_list = [] prob_wvi_beam_op_list = [] prob_wvi_beam_st_op_list = [] prob_wvi_beam_ed_op_list = [] # To re-use code, repeat the calculation unnecessarily. for i_op in range(self.n_cond_ops - 1): pr_wo_temp = [[i_op] * self.n_where_num] * bS # wv s_wv = self.wvp(emb_question, len_question, pr_wc_max) prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy() # prob_wv pr_wvi_beam, prob_wvi_beam, prob_wvi_beam_st, prob_wvi_beam_ed = pred_wvi_se_beam(self.n_where_num, s_wv, beam_size) pr_wvi_beam_op_list.append(pr_wvi_beam) prob_wvi_beam_op_list.append(prob_wvi_beam) prob_wvi_beam_st_op_list.append(prob_wvi_beam_st) prob_wvi_beam_ed_op_list.append(prob_wvi_beam_ed) # pr_wvi_beam = [B, n_where_num, k_logit**2 [st, ed] paris] # pred_wv_beam # Calculate joint probability of where-clause # prob_w = [batch, wc, wo, wv] = [B, n_where_num, n_cond_op, n_pairs] n_wv_beam_pairs = prob_wvi_beam.shape[2] prob_w = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs]) prob_wc_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs]) prob_wo_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs]) prob_wvi_st_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs]) prob_wvi_ed_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs]) for b in range(bS): for i_wn in range(self.n_where_num): for i_op in range(self.n_cond_ops - 1): # do not use final one p_wc = prob_wc_max[b, i_wn] for i_wv_beam in range(n_wv_beam_pairs): # i_wc = pr_wc_max[b][i_wn] # already done p_wo = prob_wo_max[b, i_wn, i_op] p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam] prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv prob_wc_dupl[b, i_wn, i_op, i_wv_beam] = p_wc prob_wo_dupl[b, i_wn, i_op, i_wv_beam] = p_wo p_wv_st = prob_wvi_beam_st_op_list[i_op][b, i_wn, i_wv_beam] p_wv_ed = prob_wvi_beam_ed_op_list[i_op][b, i_wn, i_wv_beam] prob_wvi_st_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_st prob_wvi_ed_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_ed # Perform execution guided decoding conds_max = [] prob_conds_max = [] # while len(conds_max) < self.n_where_num: idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True) # idxs = [B, i_wc_beam, i_op, i_wv_pairs] # Construct conds1. Collect only executable one. It is descending order of the probability. pr_wvi_max = [] p_wc_max = [] p_wo_max = [] p_wvi_max = [] for b, idxs1 in enumerate(idxs): conds_max1 = [] prob_conds_max1 = [] pr_wvi1_max = [] p_wc1_max = [] p_wo1_max = [] p_wvi1_max = [] for i_wn, idxs11 in enumerate(idxs1): i_wc = pr_wc_max[b][idxs11[0]] i_op = idxs11[1] wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]] # idx11[0] # get wv_str temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [tt_to_t_idx[b]], [nlu[b]]) merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b]) conds11 = [i_wc, i_op, merged_wv11] prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2]] p_wc11_max = prob_wc_dupl[b, idxs11[0], idxs11[1], idxs11[2]] p_wo11_max = prob_wo_dupl[b, idxs11[0], idxs11[1], idxs11[2]] p_wvi11_max = [ prob_wvi_st_dupl[b, idxs11[0], idxs11[1], idxs11[2]], prob_wvi_ed_dupl[b, idxs11[0], idxs11[1], idxs11[2]] ] # test execution # print(nlu[b]) # print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11]) pr_ans = engine.execute(tb[b]['id'], pr_sc_best[b], pr_sa_best[b], [conds11]) if bool(pr_ans): # pr_ans is not empty! conds_max1.append(conds11) prob_conds_max1.append(prob_conds11) pr_wvi1_max.append(wvi) p_wc1_max.append(p_wc11_max) p_wo1_max.append(p_wo11_max) p_wvi1_max.append(p_wvi11_max) conds_max.append(conds_max1) prob_conds_max.append(prob_conds_max1) pr_wvi_max.append(pr_wvi1_max) p_wc_max.append(p_wc1_max) p_wo_max.append(p_wo1_max) p_wvi_max.append(p_wvi1_max) # May need to do more exhuastive search? # i.e. up to.. getting all executable cases. # Calculate total probability to decide the number of where-clauses pr_sql_i = [] prob_wn_w = [] # total where-clause probability pr_wn_based_on_prob = [] pr_wvi_best = [] p_wc = [] p_wo = [] p_wvi = [] for b, prob_wn1 in enumerate(prob_wn): max_executable_wn1 = len(conds_max[b]) prob_wn_w1 = [] prob_wn_w1.append(prob_wn1[0]) # wn=0 case. for i_wn in range(max_executable_wn1): prob_wn_w11 = prob_wn1[i_wn + 1] * prob_conds_max[b][i_wn] prob_wn_w1.append(prob_wn_w11) pr_wn_based_on_prob.append(argmax(prob_wn_w1)) prob_wn_w.append(prob_wn_w1) pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]} pr_wvi_best1 = pr_wvi_max[b][:pr_wn_based_on_prob[b]] pr_sql_i.append(pr_sql_i1) pr_wvi_best.append(pr_wvi_best1) p_wc.append( p_wc_max[b][:pr_wn_based_on_prob[b]] ) p_wo.append( p_wo_max[b][:pr_wn_based_on_prob[b]] ) p_wvi.append( p_wvi_max[b][:pr_wn_based_on_prob[b]] ) # s_wv = [B, n_where_num, max_nlu_tokens, 2] p_wn = cal_prob_wn(s_wn, pr_wn_based_on_prob) p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi) return prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \ p_where, p_wn, p_wc, p_wo, p_wvi def Loss_s2s(score, g_pnt_idxs): """ score = [B, T, max_seq_length] """ # WHERE string part loss = 0 for b, g_pnt_idxs1 in enumerate(g_pnt_idxs): ed = len(g_pnt_idxs1) - 1 score_part = score[b, :ed] loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift. return loss
2.0625
2
www/app.py
leeeGreat/xlw_study_python
1
121
<filename>www/app.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = '<NAME>' ''' async web application. ''' import logging; logging.basicConfig(level=logging.INFO) import asyncio, os, json, time from datetime import datetime from aiohttp import web def index(request): return web.Response(body=b'<h1>Awesome</h1>') async def init(loop): app = web.Application(loop=loop) app.router.add_route('GET', '/', index) srv = await loop.create_server(app.make_handler(), '127.0.0.1', 9000) logging.info('server started at http://127.0.0.1:9000...') return srv loop = asyncio.get_event_loop() loop.run_until_complete(init(loop)) loop.run_forever()
2.578125
3
examples/Testing/flopy3_plotdata.py
ritchie46/flopy
1
122
<reponame>ritchie46/flopy from __future__ import print_function import os import numpy as np import matplotlib.pyplot as plt import flopy fb = flopy.modflow.Modflow.load('freyberg', version='mf2005', model_ws=os.path.join('..', 'data', 'freyberg'), verbose=True) dis = fb.dis top = fb.dis.top fb.dis.top.plot(grid=True, colorbar=True) fb.dis.botm.plot(grid=True, colorbar=True) fb.dis.plot() plt.show() fb.dis.plot() plt.show() fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1,2,1, aspect='equal') fb.dis.top.plot(grid=True, axes=ax, colorbar=True) ax = fig.add_subplot(1,2,2, aspect='equal') fb.dis.botm.plot(grid=True, axes=ax, colorbar=True) plt.show() print('this is the end my friend')
2.5625
3
chia/components/sample_transformers/__init__.py
cabrust/chia
0
123
<reponame>cabrust/chia from chia import components from chia.components.sample_transformers import identity from chia.components.sample_transformers.sample_transformer import SampleTransformer class SampleTransformerFactory(components.Factory): name_to_class_mapping = {"identity": identity.IdentitySampleTransformer} __all__ = ["SampleTransformer", "SampleTransformerFactory"]
2.015625
2
3/3.6/add_guest.py
singi2016cn/python-scaffold
0
124
<reponame>singi2016cn/python-scaffold # 添加嘉宾 names = [] names.append('singi') names.append('lily') names.append('sam') print('I find a big dining-table,I can invite more friends.') names.insert(0, 'xiaoling') names.insert(2, 'fangsi') names.append('zhangqing') greets = ',would you like to have dinner with me ?' print(names[0]+greets) print(names[1]+greets) print(names[2]+greets) print(names[3]+greets) print(names[4]+greets) print(names[5]+greets)
3.234375
3
apps/pypi/tests/test_slurper.py
cartwheelweb/packaginator
1
125
<filename>apps/pypi/tests/test_slurper.py from django.template.defaultfilters import slugify from django.test import TestCase from package.models import Package, Version from pypi.slurper import Slurper TEST_PACKAGE_NAME = 'Django' TEST_PACKAGE_VERSION = '1.3' TEST_PACKAGE_REPO_NAME = 'django-uni-form' class SlurpAllTests(TestCase): def test_get_latest_version_number(self): slurper = Slurper(TEST_PACKAGE_NAME) version = slurper.get_latest_version_number(TEST_PACKAGE_NAME) self.assertEquals(version, TEST_PACKAGE_VERSION) def test_get_or_create_package(self): slurper = Slurper(TEST_PACKAGE_NAME) version = slurper.get_latest_version_number(TEST_PACKAGE_NAME) package, created = slurper.get_or_create_package(TEST_PACKAGE_NAME, version) self.assertTrue(created) self.assertTrue(isinstance(package, Package)) self.assertEquals(package.title, TEST_PACKAGE_NAME) self.assertEquals(package.slug, slugify(TEST_PACKAGE_NAME)) def test_get_or_create_with_repo(self): slurper = Slurper(TEST_PACKAGE_REPO_NAME) version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME) package, created = slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version) self.assertTrue(created) self.assertTrue(isinstance(package, Package)) self.assertEquals(package.title, TEST_PACKAGE_REPO_NAME) self.assertEquals(package.slug, slugify(TEST_PACKAGE_REPO_NAME)) def test_check_versions(self): slurper = Slurper(TEST_PACKAGE_REPO_NAME) version = slurper.get_latest_version_number(TEST_PACKAGE_REPO_NAME) # make me a package (Actually, make me a billionare) slurper.get_or_create_package(TEST_PACKAGE_REPO_NAME, version) # fetch the package for testing package = Package.objects.get(title=TEST_PACKAGE_REPO_NAME) self.assertTrue(package.pypi_downloads > 1000)
2.34375
2
azure-mgmt-logic/azure/mgmt/logic/models/recurrence_schedule_occurrence.py
azuresdkci1x/azure-sdk-for-python-1722
1
126
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class RecurrenceScheduleOccurrence(Model): """RecurrenceScheduleOccurrence. :param day: The day of the week. Possible values include: 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday' :type day: str or :class:`DayOfWeek <azure.mgmt.logic.models.DayOfWeek>` :param occurrence: The occurrence. :type occurrence: int """ _attribute_map = { 'day': {'key': 'day', 'type': 'DayOfWeek'}, 'occurrence': {'key': 'occurrence', 'type': 'int'}, } def __init__(self, day=None, occurrence=None): self.day = day self.occurrence = occurrence
2.171875
2
pandas/core/apply.py
AakankshaAshok/pandas
0
127
import inspect import numpy as np from pandas._libs import reduction as libreduction from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( is_dict_like, is_extension_array_dtype, is_list_like, is_sequence, ) from pandas.core.dtypes.generic import ABCSeries def frame_apply( obj, func, axis=0, raw=False, result_type=None, ignore_failures=False, args=None, kwds=None, ): """ construct and return a row or column based frame apply object """ axis = obj._get_axis_number(axis) if axis == 0: klass = FrameRowApply elif axis == 1: klass = FrameColumnApply return klass( obj, func, raw=raw, result_type=result_type, ignore_failures=ignore_failures, args=args, kwds=kwds, ) class FrameApply: def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds): self.obj = obj self.raw = raw self.ignore_failures = ignore_failures self.args = args or () self.kwds = kwds or {} if result_type not in [None, "reduce", "broadcast", "expand"]: raise ValueError( "invalid value for result_type, must be one " "of {None, 'reduce', 'broadcast', 'expand'}" ) self.result_type = result_type # curry if needed if (kwds or args) and not isinstance(func, (np.ufunc, str)): def f(x): return func(x, *args, **kwds) else: f = func self.f = f # results self.result = None self.res_index = None self.res_columns = None @property def columns(self): return self.obj.columns @property def index(self): return self.obj.index @cache_readonly def values(self): return self.obj.values @cache_readonly def dtypes(self): return self.obj.dtypes @property def agg_axis(self): return self.obj._get_agg_axis(self.axis) def get_result(self): """ compute the results """ # dispatch to agg if is_list_like(self.f) or is_dict_like(self.f): return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds) # all empty if len(self.columns) == 0 and len(self.index) == 0: return self.apply_empty_result() # string dispatch if isinstance(self.f, str): # Support for `frame.transform('method')` # Some methods (shift, etc.) require the axis argument, others # don't, so inspect and insert if necessary. func = getattr(self.obj, self.f) sig = inspect.getfullargspec(func) if "axis" in sig.args: self.kwds["axis"] = self.axis return func(*self.args, **self.kwds) # ufunc elif isinstance(self.f, np.ufunc): with np.errstate(all="ignore"): results = self.obj._data.apply("apply", func=self.f) return self.obj._constructor( data=results, index=self.index, columns=self.columns, copy=False ) # broadcasting if self.result_type == "broadcast": return self.apply_broadcast() # one axis empty elif not all(self.obj.shape): return self.apply_empty_result() # raw elif self.raw and not self.obj._is_mixed_type: return self.apply_raw() return self.apply_standard() def apply_empty_result(self): """ we have an empty result; at least 1 axis is 0 we will try to apply the function to an empty series in order to see if this is a reduction function """ # we are not asked to reduce or infer reduction # so just return a copy of the existing object if self.result_type not in ["reduce", None]: return self.obj.copy() # we may need to infer should_reduce = self.result_type == "reduce" from pandas import Series if not should_reduce: try: r = self.f(Series([])) except Exception: pass else: should_reduce = not isinstance(r, Series) if should_reduce: if len(self.agg_axis): r = self.f(Series([])) else: r = np.nan return self.obj._constructor_sliced(r, index=self.agg_axis) else: return self.obj.copy() def apply_raw(self): """ apply to the values as a numpy array """ try: result = libreduction.compute_reduction(self.values, self.f, axis=self.axis) except ValueError as err: if "Function does not reduce" not in str(err): # catch only ValueError raised intentionally in libreduction raise result = np.apply_along_axis(self.f, self.axis, self.values) # TODO: mixed type case if result.ndim == 2: return self.obj._constructor(result, index=self.index, columns=self.columns) else: return self.obj._constructor_sliced(result, index=self.agg_axis) def apply_broadcast(self, target): result_values = np.empty_like(target.values) # axis which we want to compare compliance result_compare = target.shape[0] for i, col in enumerate(target.columns): res = self.f(target[col]) ares = np.asarray(res).ndim # must be a scalar or 1d if ares > 1: raise ValueError("too many dims to broadcast") elif ares == 1: # must match return dim if result_compare != len(res): raise ValueError("cannot broadcast result") result_values[:, i] = res # we *always* preserve the original index / columns result = self.obj._constructor( result_values, index=target.index, columns=target.columns ) return result def apply_standard(self): # try to reduce first (by default) # this only matters if the reduction in values is of different dtype # e.g. if we want to apply to a SparseFrame, then can't directly reduce # we cannot reduce using non-numpy dtypes, # as demonstrated in gh-12244 if ( self.result_type in ["reduce", None] and not self.dtypes.apply(is_extension_array_dtype).any() # Disallow complex_internals since libreduction shortcut # cannot handle MultiIndex and not self.agg_axis._has_complex_internals ): values = self.values index = self.obj._get_axis(self.axis) labels = self.agg_axis empty_arr = np.empty(len(index), dtype=values.dtype) # Preserve subclass for e.g. test_subclassed_apply dummy = self.obj._constructor_sliced( empty_arr, index=index, dtype=values.dtype ) try: result = libreduction.compute_reduction( values, self.f, axis=self.axis, dummy=dummy, labels=labels ) except ValueError as err: if "Function does not reduce" not in str(err): # catch only ValueError raised intentionally in libreduction raise except TypeError: # e.g. test_apply_ignore_failures we just ignore if not self.ignore_failures: raise except ZeroDivisionError: # reached via numexpr; fall back to python implementation pass else: return self.obj._constructor_sliced(result, index=labels) # compute the result using the series generator self.apply_series_generator() # wrap results return self.wrap_results() def apply_series_generator(self): series_gen = self.series_generator res_index = self.result_index i = None keys = [] results = {} if self.ignore_failures: successes = [] for i, v in enumerate(series_gen): try: results[i] = self.f(v) except Exception: pass else: keys.append(v.name) successes.append(i) # so will work with MultiIndex if len(successes) < len(res_index): res_index = res_index.take(successes) else: for i, v in enumerate(series_gen): results[i] = self.f(v) keys.append(v.name) self.results = results self.res_index = res_index self.res_columns = self.result_columns def wrap_results(self): results = self.results # see if we can infer the results if len(results) > 0 and 0 in results and is_sequence(results[0]): return self.wrap_results_for_axis() # dict of scalars result = self.obj._constructor_sliced(results) result.index = self.res_index return result class FrameRowApply(FrameApply): axis = 0 def apply_broadcast(self): return super().apply_broadcast(self.obj) @property def series_generator(self): return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) @property def result_index(self): return self.columns @property def result_columns(self): return self.index def wrap_results_for_axis(self): """ return the results for the rows """ results = self.results result = self.obj._constructor(data=results) if not isinstance(results[0], ABCSeries): if len(result.index) == len(self.res_columns): result.index = self.res_columns if len(result.columns) == len(self.res_index): result.columns = self.res_index return result class FrameColumnApply(FrameApply): axis = 1 def apply_broadcast(self): result = super().apply_broadcast(self.obj.T) return result.T @property def series_generator(self): constructor = self.obj._constructor_sliced return ( constructor(arr, index=self.columns, name=name) for i, (arr, name) in enumerate(zip(self.values, self.index)) ) @property def result_index(self): return self.index @property def result_columns(self): return self.columns def wrap_results_for_axis(self): """ return the results for the columns """ results = self.results # we have requested to expand if self.result_type == "expand": result = self.infer_to_same_shape() # we have a non-series and don't want inference elif not isinstance(results[0], ABCSeries): from pandas import Series result = Series(results) result.index = self.res_index # we may want to infer results else: result = self.infer_to_same_shape() return result def infer_to_same_shape(self): """ infer the results to the same shape as the input object """ results = self.results result = self.obj._constructor(data=results) result = result.T # set the index result.index = self.res_index # infer dtypes result = result.infer_objects() return result
2.171875
2
tests/test_model/test_recognizer/test_shufflenetv1.py
YinAoXiong/ZCls
0
128
<reponame>YinAoXiong/ZCls # -*- coding: utf-8 -*- """ @date: 2021/5/16 下午10:22 @file: test_shufflenetv1.py @author: zj @description: """ import torch from zcls.config import cfg from zcls.config.key_word import KEY_OUTPUT from zcls.model.recognizers.build import build_recognizer def test_data(model): data = torch.randn(1, 3, 224, 224) outputs = model(data)[KEY_OUTPUT] print(outputs.shape) assert outputs.shape == (1, 1000) def test_shufflenet(): cfg.merge_from_file('configs/benchmarks/shufflenet/shufflenet_v1_3g2x_zcls_imagenet_224.yaml') print(cfg) model = build_recognizer(cfg, torch.device('cpu')) print(model) test_data(model) if __name__ == '__main__': test_shufflenet()
2.15625
2
autotest/gcore/vsis3.py
jpapadakis/gdal
18
129
<filename>autotest/gcore/vsis3.py #!/usr/bin/env pytest ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test /vsis3 # Author: <NAME> <even dot rouault at spatialys dot com> # ############################################################################### # Copyright (c) 2015, <NAME> <even dot rouault at spatialys dot com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import json import os.path import stat import sys from osgeo import gdal import gdaltest import webserver import pytest def open_for_read(uri): """ Opens a test file for reading. """ return gdal.VSIFOpenExL(uri, 'rb', 1) ############################################################################### def test_vsis3_init(): gdaltest.aws_vars = {} for var in ('AWS_SECRET_ACCESS_KEY', 'AWS_ACCESS_KEY_ID', 'AWS_TIMESTAMP', 'AWS_HTTPS', 'AWS_VIRTUAL_HOSTING', 'AWS_S3_ENDPOINT', 'AWS_REQUEST_PAYER', 'AWS_DEFAULT_REGION', 'AWS_DEFAULT_PROFILE', 'AWS_PROFILE', 'AWS_NO_SIGN_REQUEST'): gdaltest.aws_vars[var] = gdal.GetConfigOption(var) if gdaltest.aws_vars[var] is not None: gdal.SetConfigOption(var, "") # To avoid user AWS credentials in ~/.aws/credentials and ~/.aws/config # to mess up our tests gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '') gdal.SetConfigOption('AWS_CONFIG_FILE', '') gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', '') assert gdal.GetSignedURL('/vsis3/foo/bar') is None ############################################################################### # Test AWS_NO_SIGN_REQUEST=YES def test_vsis3_no_sign_request(): if not gdaltest.built_against_curl(): pytest.skip() with gdaltest.config_option('AWS_NO_SIGN_REQUEST', 'YES'): actual_url = gdal.GetActualURL('/vsis3/landsat-pds/L8/001/002/LC80010022016230LGN00/LC80010022016230LGN00_B1.TIF') assert actual_url == 'https://landsat-pds.s3.amazonaws.com/L8/001/002/LC80010022016230LGN00/LC80010022016230LGN00_B1.TIF' actual_url = gdal.GetActualURL('/vsis3_streaming/landsat-pds/L8/001/002/LC80010022016230LGN00/LC80010022016230LGN00_B1.TIF') assert actual_url == 'https://landsat-pds.s3.amazonaws.com/L8/001/002/LC80010022016230LGN00/LC80010022016230LGN00_B1.TIF' f = open_for_read('/vsis3/landsat-pds/L8/001/002/LC80010022016230LGN00/LC80010022016230LGN00_B1.TIF') if f is None: if gdaltest.gdalurlopen('https://landsat-pds.s3.amazonaws.com/L8/001/002/LC80010022016230LGN00/LC80010022016230LGN00_B1.TIF') is None: pytest.skip('cannot open URL') pytest.fail() gdal.VSIFCloseL(f) ############################################################################### # Test Sync() and multithreaded download def test_vsis3_sync_multithreaded_download(): if not gdaltest.built_against_curl(): pytest.skip() def cbk(pct, _, tab): assert pct >= tab[0] tab[0] = pct return True tab = [ -1 ] # Use a public bucket with /test_dummy/foo and /test_dummy/bar files with gdaltest.config_option('AWS_NO_SIGN_REQUEST', 'YES'): assert gdal.Sync('/vsis3/cdn.proj.org/test_dummy', '/vsimem/test_vsis3_no_sign_request_sync', options=['NUM_THREADS=2'], callback=cbk, callback_data=tab) assert tab[0] == 1.0 assert gdal.VSIStatL('/vsimem/test_vsis3_no_sign_request_sync/test_dummy/foo').size == 4 assert gdal.VSIStatL('/vsimem/test_vsis3_no_sign_request_sync/test_dummy/bar').size == 4 gdal.RmdirRecursive('/vsimem/test_vsis3_no_sign_request_sync') ############################################################################### # Test Sync() and multithreaded download and CHUNK_SIZE def test_vsis3_sync_multithreaded_download_chunk_size(): if not gdaltest.built_against_curl(): pytest.skip() def cbk(pct, _, tab): assert pct >= tab[0] tab[0] = pct return True tab = [ -1 ] # Use a public bucket with /test_dummy/foo and /test_dummy/bar files with gdaltest.config_option('AWS_NO_SIGN_REQUEST', 'YES'): assert gdal.Sync('/vsis3/cdn.proj.org/test_dummy', '/vsimem/test_vsis3_no_sign_request_sync', options=['NUM_THREADS=2', 'CHUNK_SIZE=3'], callback=cbk, callback_data=tab) assert tab[0] == 1.0 assert gdal.VSIStatL('/vsimem/test_vsis3_no_sign_request_sync/test_dummy/foo').size == 4 assert gdal.VSIStatL('/vsimem/test_vsis3_no_sign_request_sync/test_dummy/bar').size == 4 gdal.RmdirRecursive('/vsimem/test_vsis3_no_sign_request_sync') ############################################################################### # Error cases def test_vsis3_1(): if not gdaltest.built_against_curl(): pytest.skip() # Missing AWS_SECRET_ACCESS_KEY gdal.ErrorReset() with gdaltest.error_handler(): f = open_for_read('/vsis3/foo/bar') assert f is None and gdal.VSIGetLastErrorMsg().find('AWS_SECRET_ACCESS_KEY') >= 0 gdal.ErrorReset() with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/foo/bar') assert f is None and gdal.VSIGetLastErrorMsg().find('AWS_SECRET_ACCESS_KEY') >= 0 gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY') # Missing AWS_ACCESS_KEY_ID gdal.ErrorReset() with gdaltest.error_handler(): f = open_for_read('/vsis3/foo/bar') assert f is None and gdal.VSIGetLastErrorMsg().find('AWS_ACCESS_KEY_ID') >= 0 gdal.SetConfigOption('AWS_ACCESS_KEY_ID', 'AWS_ACCESS_KEY_ID') # ERROR 1: The AWS Access Key Id you provided does not exist in our records. gdal.ErrorReset() with gdaltest.error_handler(): f = open_for_read('/vsis3/foo/bar.baz') if f is not None or gdal.VSIGetLastErrorMsg() == '': if f is not None: gdal.VSIFCloseL(f) if gdal.GetConfigOption('APPVEYOR') is not None: return pytest.fail(gdal.VSIGetLastErrorMsg()) gdal.ErrorReset() with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/foo/bar.baz') assert f is None and gdal.VSIGetLastErrorMsg() != '' ############################################################################### def test_vsis3_start_webserver(): gdaltest.webserver_process = None gdaltest.webserver_port = 0 if not gdaltest.built_against_curl(): pytest.skip() (gdaltest.webserver_process, gdaltest.webserver_port) = webserver.launch(handler=webserver.DispatcherHttpHandler) if gdaltest.webserver_port == 0: pytest.skip() gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', 'AWS_ACCESS_KEY_ID') gdal.SetConfigOption('AWS_TIMESTAMP', '20150101T000000Z') gdal.SetConfigOption('AWS_HTTPS', 'NO') gdal.SetConfigOption('AWS_VIRTUAL_HOSTING', 'NO') gdal.SetConfigOption('AWS_S3_ENDPOINT', '127.0.0.1:%d' % gdaltest.webserver_port) def get_s3_fake_bucket_resource_method(request): request.protocol_version = 'HTTP/1.1' if 'Authorization' not in request.headers: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) return expected_authorization_8080 = 'AWS4-HMAC-SHA256 Credential=AWS_ACCESS_KEY_ID/20150101/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=38901846b865b12ac492bc005bb394ca8d60c098b68db57c084fac686a932f9e' expected_authorization_8081 = 'AWS4-HMAC-SHA256 Credential=AWS_ACCESS_KEY_ID/20150101/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=9f623b7ffce76188a456c70fb4813eb31969e88d130d6b4d801b3accbf050d6c' if request.headers['Authorization'] != expected_authorization_8080 and request.headers['Authorization'] != expected_authorization_8081: sys.stderr.write("Bad Authorization: '%s'\n" % str(request.headers['Authorization'])) request.send_response(403) return request.send_response(200) request.send_header('Content-type', 'text/plain') request.send_header('Content-Length', 3) request.send_header('Connection', 'close') request.end_headers() request.wfile.write("""foo""".encode('ascii')) ############################################################################### # Test with a fake AWS server def test_vsis3_2(): if gdaltest.webserver_port == 0: pytest.skip() signed_url = gdal.GetSignedURL('/vsis3/s3_fake_bucket/resource') expected_url_8080 = 'http://127.0.0.1:8080/s3_fake_bucket/resource?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AWS_ACCESS_KEY_ID%2F20150101%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20150101T000000Z&X-Amz-Expires=3600&X-Amz-Signature=dca239dd95f72ff8c37c15c840afc54cd19bdb07f7aaee2223108b5b0ad35da8&X-Amz-SignedHeaders=host' expected_url_8081 = 'http://127.0.0.1:8081/s3_fake_bucket/resource?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AWS_ACCESS_KEY_ID%2F20150101%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20150101T000000Z&X-Amz-Expires=3600&X-Amz-Signature=ef5216bc5971863414c69f6ca095276c0d62c0da97fa4f6ab80c30bd7fc146ac&X-Amz-SignedHeaders=host' assert signed_url in (expected_url_8080, expected_url_8081) handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3_streaming/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' handler = webserver.SequentialHandler() def method(request): request.protocol_version = 'HTTP/1.1' if 'Authorization' not in request.headers: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) return expected_authorization_8080 = 'AWS4-HMAC-SHA256 Credential=AWS_ACCESS_KEY_ID/20150101/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-security-token,Signature=464a21835038b4f4d292b6463b8a005b9aaa980513aa8c42fc170abb733dce85' expected_authorization_8081 = 'AWS4-HMAC-SHA256 Credential=AWS_ACCESS_KEY_ID/20150101/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-security-token,Signature=b10e91575186342f9f2acfc91c4c2c9938c4a9e8cdcbc043d09d59d9641ad7fb' if request.headers['Authorization'] != expected_authorization_8080 and request.headers['Authorization'] != expected_authorization_8081: sys.stderr.write("Bad Authorization: '%s'\n" % str(request.headers['Authorization'])) request.send_response(403) return request.send_response(200) request.send_header('Content-type', 'text/plain') request.send_header('Content-Length', 3) request.end_headers() request.wfile.write("""foo""".encode('ascii')) handler.add('GET', '/s3_fake_bucket_with_session_token/resource', custom_method=method) # Test with temporary credentials with gdaltest.config_option('AWS_SESSION_TOKEN', 'AWS_SESSION_TOKEN'): with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket_with_session_token/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) handler = webserver.SequentialHandler() def method(request): request.protocol_version = 'HTTP/1.1' if 'Range' in request.headers: if request.headers['Range'] != 'bytes=0-16383': sys.stderr.write("Bad Range: '%s'\n" % str(request.headers['Range'])) request.send_response(403) return request.send_response(206) request.send_header('Content-type', 'text/plain') request.send_header('Content-Range', 'bytes 0-16383/1000000') request.send_header('Content-Length', 16384) request.send_header('Connection', 'close') request.end_headers() request.wfile.write(('a' * 16384).encode('ascii')) else: request.send_response(200) request.send_header('Content-type', 'text/plain') request.send_header('Content-Length', 1000000) request.send_header('Connection', 'close') request.end_headers() request.wfile.write(('a' * 1000000).encode('ascii')) handler.add('GET', '/s3_fake_bucket/resource2.bin', custom_method=method) with webserver.install_http_handler(handler): # old_val = gdal.GetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN') # gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'EMPTY_DIR') stat_res = gdal.VSIStatL('/vsis3/s3_fake_bucket/resource2.bin') # gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', old_val) if stat_res is None or stat_res.size != 1000000: if stat_res is not None: print(stat_res.size) else: print(stat_res) pytest.fail() handler = webserver.SequentialHandler() handler.add('HEAD', '/s3_fake_bucket/resource2.bin', 200, {'Content-type': 'text/plain', 'Content-Length': 1000000, 'Connection': 'close'}) with webserver.install_http_handler(handler): stat_res = gdal.VSIStatL('/vsis3_streaming/s3_fake_bucket/resource2.bin') if stat_res is None or stat_res.size != 1000000: if stat_res is not None: print(stat_res.size) else: print(stat_res) pytest.fail() handler = webserver.SequentialHandler() def method(request): request.protocol_version = 'HTTP/1.1' if request.headers['Authorization'].find('us-east-1') >= 0: request.send_response(400) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>AuthorizationHeaderMalformed</Code><Region>us-west-2</Region></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.send_header('Connection', 'close') request.end_headers() request.wfile.write(response.encode('ascii')) else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) handler.add('GET', '/s3_fake_bucket/redirect', custom_method=method) def method(request): request.protocol_version = 'HTTP/1.1' if request.headers['Authorization'].find('us-west-2') >= 0 and request.headers['Host'].startswith('127.0.0.1'): request.send_response(301) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>PermanentRedirect</Code><Endpoint>localhost:%d</Endpoint></Error>' % request.server.port response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.send_header('Connection', 'close') request.end_headers() request.wfile.write(response.encode('ascii')) else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) handler.add('GET', '/s3_fake_bucket/redirect', custom_method=method) def method(request): request.protocol_version = 'HTTP/1.1' if request.headers['Authorization'].find('us-west-2') >= 0 and request.headers['Host'].startswith('localhost'): request.send_response(200) request.send_header('Content-type', 'text/plain') request.send_header('Content-Length', 3) request.send_header('Connection', 'close') request.end_headers() request.wfile.write("""foo""".encode('ascii')) else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) handler.add('GET', '/s3_fake_bucket/redirect', custom_method=method) # Test region and endpoint 'redirects' with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/redirect') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) if data != 'foo': if gdaltest.is_travis_branch('trusty'): pytest.skip('Skipped on trusty branch, but should be investigated') pytest.fail(data) # Test region and endpoint 'redirects' gdal.VSICurlClearCache() handler.req_count = 0 with webserver.install_http_handler(handler): f = open_for_read('/vsis3_streaming/s3_fake_bucket/redirect') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' handler = webserver.SequentialHandler() def method(request): # /vsis3_streaming/ should have remembered the change of region and endpoint if request.headers['Authorization'].find('us-west-2') < 0 or \ not request.headers['Host'].startswith('localhost'): sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) request.protocol_version = 'HTTP/1.1' request.send_response(400) response = 'bla' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.send_header('Connection', 'close') request.end_headers() request.wfile.write(response.encode('ascii')) handler.add('GET', '/s3_fake_bucket/non_xml_error', custom_method=method) gdal.ErrorReset() with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/s3_fake_bucket/non_xml_error') assert f is None and gdal.VSIGetLastErrorMsg().find('bla') >= 0 handler = webserver.SequentialHandler() response = '<?xml version="1.0" encoding="UTF-8"?><oops>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) handler.add('GET', '/s3_fake_bucket/invalid_xml_error', 400, {'Content-type': 'application/xml', 'Transfer-Encoding': 'chunked', 'Connection': 'close'}, response) gdal.ErrorReset() with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/s3_fake_bucket/invalid_xml_error') assert f is None and gdal.VSIGetLastErrorMsg().find('<oops>') >= 0 handler = webserver.SequentialHandler() response = '<?xml version="1.0" encoding="UTF-8"?><Error/>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) handler.add('GET', '/s3_fake_bucket/no_code_in_error', 400, {'Content-type': 'application/xml', 'Transfer-Encoding': 'chunked', 'Connection': 'close'}, response) gdal.ErrorReset() with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/s3_fake_bucket/no_code_in_error') assert f is None and gdal.VSIGetLastErrorMsg().find('<Error/>') >= 0 handler = webserver.SequentialHandler() response = '<?xml version="1.0" encoding="UTF-8"?><Error><Code>AuthorizationHeaderMalformed</Code></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) handler.add('GET', '/s3_fake_bucket/no_region_in_AuthorizationHeaderMalformed_error', 400, {'Content-type': 'application/xml', 'Transfer-Encoding': 'chunked', 'Connection': 'close'}, response) gdal.ErrorReset() with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/s3_fake_bucket/no_region_in_AuthorizationHeaderMalformed_error') assert f is None and gdal.VSIGetLastErrorMsg().find('<Error>') >= 0 handler = webserver.SequentialHandler() response = '<?xml version="1.0" encoding="UTF-8"?><Error><Code>PermanentRedirect</Code></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) handler.add('GET', '/s3_fake_bucket/no_endpoint_in_PermanentRedirect_error', 400, {'Content-type': 'application/xml', 'Transfer-Encoding': 'chunked', 'Connection': 'close'}, response) gdal.ErrorReset() with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/s3_fake_bucket/no_endpoint_in_PermanentRedirect_error') assert f is None and gdal.VSIGetLastErrorMsg().find('<Error>') >= 0 handler = webserver.SequentialHandler() response = '<?xml version="1.0" encoding="UTF-8"?><Error><Code>bla</Code></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) handler.add('GET', '/s3_fake_bucket/no_message_in_error', 400, {'Content-type': 'application/xml', 'Transfer-Encoding': 'chunked', 'Connection': 'close'}, response) gdal.ErrorReset() with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3_streaming/s3_fake_bucket/no_message_in_error') assert f is None and gdal.VSIGetLastErrorMsg().find('<Error>') >= 0 # Test with requester pays handler = webserver.SequentialHandler() def method(request): if 'x-amz-request-payer' not in request.headers: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) return expected_authorization_8080 = 'AWS4-HMAC-SHA256 Credential=AWS_ACCESS_KEY_ID/20150101/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-request-payer,Signature=cf713a394e1b629ac0e468d60d3d4a12f5236fd72d21b6005c758b0dfc7049cd' expected_authorization_8081 = 'AWS4-HMAC-SHA256 Credential=AWS_ACCESS_KEY_ID/20150101/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-request-payer,Signature=4756166679008a1a40cd6ff91dbbef670a71c11bf8e3c998dd7385577c3ac4d9' if request.headers['Authorization'] != expected_authorization_8080 and request.headers['Authorization'] != expected_authorization_8081: sys.stderr.write("Bad Authorization: '%s'\n" % str(request.headers['Authorization'])) request.send_response(403) return if request.headers['x-amz-request-payer'] != 'requester': sys.stderr.write("Bad x-amz-request-payer: '%s'\n" % str(request.headers['x-amz-request-payer'])) request.send_response(403) return request.send_response(200) request.send_header('Content-type', 'text/plain') request.send_header('Content-Length', 3) request.send_header('Connection', 'close') request.end_headers() request.wfile.write("""foo""".encode('ascii')) handler.add('GET', '/s3_fake_bucket_with_requester_pays/resource', custom_method=method) with gdaltest.config_option('AWS_REQUEST_PAYER', 'requester'): with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3/s3_fake_bucket_with_requester_pays/resource') assert f is not None data = gdal.VSIFReadL(1, 3, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' # Test temporary redirect handler = webserver.SequentialHandler() class HandlerClass(object): def __init__(self, response_value): self.old_authorization = None self.response_value = response_value def method_req_1(self, request): if request.headers['Host'].find('127.0.0.1') < 0: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) return self.old_authorization = request.headers['Authorization'] request.protocol_version = 'HTTP/1.1' request.send_response(307) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>TemporaryRedirect</Code><Endpoint>localhost:%d</Endpoint></Error>' % request.server.port response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.end_headers() request.wfile.write(response.encode('ascii')) def method_req_2(self, request): if request.headers['Host'].find('localhost') < 0: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) return if self.old_authorization == request.headers['Authorization']: sys.stderr.write('Should have get a different Authorization. Bad headers: %s\n' % str(request.headers)) request.send_response(403) return request.protocol_version = 'HTTP/1.1' request.send_response(200) response = self.response_value request.send_header('Content-Length', len(response)) request.end_headers() request.wfile.write(response.encode('ascii')) h = HandlerClass('foo') handler.add('GET', '/s3_test_temporary_redirect_read/resource', custom_method=h.method_req_1) handler.add('GET', '/s3_test_temporary_redirect_read/resource', custom_method=h.method_req_2) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_test_temporary_redirect_read/resource') assert f is not None data = gdal.VSIFReadL(1, 3, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' # Retry on the same bucket and check that the redirection was indeed temporary handler = webserver.SequentialHandler() h = HandlerClass('bar') handler.add('GET', '/s3_test_temporary_redirect_read/resource2', custom_method=h.method_req_1) handler.add('GET', '/s3_test_temporary_redirect_read/resource2', custom_method=h.method_req_2) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_test_temporary_redirect_read/resource2') assert f is not None data = gdal.VSIFReadL(1, 3, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'bar' ############################################################################### # Test re-opening after changing configuration option (#2294) def test_vsis3_open_after_config_option_chage(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/test_vsis3_change_config_options/?delimiter=%2F', 403) handler.add('GET', '/test_vsis3_change_config_options/test.bin', 403) with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3/test_vsis3_change_config_options/test.bin') assert f is None # Does not attempt any network access since we didn't change significant # parameters f = open_for_read('/vsis3/test_vsis3_change_config_options/test.bin') assert f is None with gdaltest.config_option('AWS_ACCESS_KEY_ID', 'another_key_id'): handler = webserver.SequentialHandler() handler.add('GET', '/test_vsis3_change_config_options/?delimiter=%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix></Prefix> <Contents> <Key>test.bin</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>123456</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/test_vsis3_change_config_options/test.bin') assert f is not None gdal.VSIFCloseL(f) ############################################################################### # Test ReadDir() with a fake AWS server def test_vsis3_readdir(): if gdaltest.webserver_port == 0: pytest.skip() handler = webserver.SequentialHandler() def method(request): request.protocol_version = 'HTTP/1.1' if request.headers['Authorization'].find('us-east-1') >= 0: request.send_response(400) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>AuthorizationHeaderMalformed</Code><Region>us-west-2</Region></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.end_headers() request.wfile.write(response.encode('ascii')) elif request.headers['Authorization'].find('us-west-2') >= 0: if request.headers['Host'].startswith('127.0.0.1'): request.send_response(301) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>PermanentRedirect</Code><Endpoint>localhost:%d</Endpoint></Error>' % request.server.port response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.end_headers() request.wfile.write(response.encode('ascii')) elif request.headers['Host'].startswith('localhost'): request.send_response(200) request.send_header('Content-type', 'application/xml') response = """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>a_dir with_space/</Prefix> <NextMarker>bla</NextMarker> <Contents> <Key>a_dir with_space/resource3 with_space.bin</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>123456</Size> </Contents> </ListBucketResult> """ request.send_header('Content-Length', len(response)) request.end_headers() request.wfile.write(response.encode('ascii')) else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) handler.add('GET', '/s3_fake_bucket2/?delimiter=%2F&prefix=a_dir%20with_space%2F', custom_method=method) handler.add('GET', '/s3_fake_bucket2/?delimiter=%2F&prefix=a_dir%20with_space%2F', custom_method=method) handler.add('GET', '/s3_fake_bucket2/?delimiter=%2F&prefix=a_dir%20with_space%2F', custom_method=method) def method(request): # /vsis3/ should have remembered the change of region and endpoint if request.headers['Authorization'].find('us-west-2') < 0 or \ not request.headers['Host'].startswith('localhost'): sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) request.protocol_version = 'HTTP/1.1' request.send_response(200) request.send_header('Content-type', 'application/xml') response = """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>a_dir with_space/</Prefix> <Contents> <Key>a_dir with_space/resource4.bin</Key> <LastModified>2015-10-16T12:34:56.000Z</LastModified> <Size>456789</Size> </Contents> <Contents> <Key>a_dir with_space/i_am_a_glacier_file</Key> <LastModified>2015-10-16T12:34:56.000Z</LastModified> <Size>456789</Size> <StorageClass>GLACIER</StorageClass> </Contents> <CommonPrefixes> <Prefix>a_dir with_space/subdir/</Prefix> </CommonPrefixes> </ListBucketResult> """ request.send_header('Content-Length', len(response)) request.end_headers() request.wfile.write(response.encode('ascii')) handler.add('GET', '/s3_fake_bucket2/?delimiter=%2F&marker=bla&prefix=a_dir%20with_space%2F', custom_method=method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket2/a_dir with_space/resource3 with_space.bin') if f is None: if gdaltest.is_travis_branch('trusty'): pytest.skip('Skipped on trusty branch, but should be investigated') pytest.fail() gdal.VSIFCloseL(f) with webserver.install_http_handler(webserver.SequentialHandler()): dir_contents = gdal.ReadDir('/vsis3/s3_fake_bucket2/a_dir with_space') assert dir_contents == ['resource3 with_space.bin', 'resource4.bin', 'subdir'] assert gdal.VSIStatL('/vsis3/s3_fake_bucket2/a_dir with_space/resource3 with_space.bin').size == 123456 assert gdal.VSIStatL('/vsis3/s3_fake_bucket2/a_dir with_space/resource3 with_space.bin').mtime == 1 # Same as above: cached dir_contents = gdal.ReadDir('/vsis3/s3_fake_bucket2/a_dir with_space') assert dir_contents == ['resource3 with_space.bin', 'resource4.bin', 'subdir'] # ReadDir on something known to be a file shouldn't cause network access dir_contents = gdal.ReadDir('/vsis3/s3_fake_bucket2/a_dir with_space/resource3 with_space.bin') assert dir_contents is None # Test unrelated partial clear of the cache gdal.VSICurlPartialClearCache('/vsis3/s3_fake_bucket_unrelated') assert gdal.VSIStatL('/vsis3/s3_fake_bucket2/a_dir with_space/resource3 with_space.bin').size == 123456 dir_contents = gdal.ReadDir('/vsis3/s3_fake_bucket2/a_dir with_space') assert dir_contents == ['resource3 with_space.bin', 'resource4.bin', 'subdir'] # Test partial clear of the cache gdal.VSICurlPartialClearCache('/vsis3/s3_fake_bucket2/a_dir with_space') handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket2/a_dir%20with_space/resource3%20with_space.bin', 400) handler.add('GET', '/s3_fake_bucket2/?delimiter=%2F&max-keys=100&prefix=a_dir%20with_space%2Fresource3%20with_space.bin%2F', 400) with webserver.install_http_handler(handler): gdal.VSIStatL('/vsis3/s3_fake_bucket2/a_dir with_space/resource3 with_space.bin') handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket2/?delimiter=%2F&prefix=a_dir%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>a_dir/</Prefix> <Contents> <Key>a_dir/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): dir_contents = gdal.ReadDir('/vsis3/s3_fake_bucket2/a_dir') assert dir_contents == ['test.txt'] gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket2/?delimiter=%2F&prefix=a_dir%2F', 200, {}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>a_dir/</Prefix> <Contents> <Key>a_dir/resource4.bin</Key> <LastModified>2015-10-16T12:34:56.000Z</LastModified> <Size>456789</Size> </Contents> <Contents> <Key>a_dir/i_am_a_glacier_file</Key> <LastModified>2015-10-16T12:34:56.000Z</LastModified> <Size>456789</Size> <StorageClass>GLACIER</StorageClass> </Contents> <CommonPrefixes> <Prefix>a_dir/subdir/</Prefix> </CommonPrefixes> </ListBucketResult> """) with gdaltest.config_option('CPL_VSIL_CURL_IGNORE_GLACIER_STORAGE', 'NO'): with webserver.install_http_handler(handler): dir_contents = gdal.ReadDir('/vsis3/s3_fake_bucket2/a_dir') assert dir_contents == ['resource4.bin', 'i_am_a_glacier_file', 'subdir'] # Test CPL_VSIL_CURL_NON_CACHED for config_option_value in ['/vsis3/s3_non_cached/test.txt', '/vsis3/s3_non_cached', '/vsis3/s3_non_cached:/vsis3/unrelated', '/vsis3/unrelated:/vsis3/s3_non_cached', '/vsis3/unrelated:/vsis3/s3_non_cached:/vsis3/unrelated']: with gdaltest.config_option('CPL_VSIL_CURL_NON_CACHED', config_option_value): handler = webserver.SequentialHandler() handler.add('GET', '/s3_non_cached/test.txt', 200, {}, 'foo') with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_non_cached/test.txt') assert f is not None, config_option_value data = gdal.VSIFReadL(1, 3, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo', config_option_value handler = webserver.SequentialHandler() handler.add('GET', '/s3_non_cached/test.txt', 200, {}, 'bar2') with webserver.install_http_handler(handler): size = gdal.VSIStatL('/vsis3/s3_non_cached/test.txt').size assert size == 4, config_option_value handler = webserver.SequentialHandler() handler.add('GET', '/s3_non_cached/test.txt', 200, {}, 'foo') with webserver.install_http_handler(handler): size = gdal.VSIStatL('/vsis3/s3_non_cached/test.txt').size if size != 3: print(config_option_value) pytest.fail(data) handler = webserver.SequentialHandler() handler.add('GET', '/s3_non_cached/test.txt', 200, {}, 'bar2') with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_non_cached/test.txt') assert f is not None, config_option_value data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'bar2', config_option_value # Retry without option for config_option_value in [None, '/vsis3/s3_non_cached/bar.txt']: with gdaltest.config_option('CPL_VSIL_CURL_NON_CACHED', config_option_value): handler = webserver.SequentialHandler() if config_option_value is None: handler.add('GET', '/s3_non_cached/?delimiter=%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix></Prefix> <Contents> <Key>test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> <Contents> <Key>test2.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> </ListBucketResult> """) handler.add('GET', '/s3_non_cached/test.txt', 200, {}, 'foo') with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_non_cached/test.txt') assert f is not None, config_option_value data = gdal.VSIFReadL(1, 3, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo', config_option_value handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_non_cached/test.txt') assert f is not None, config_option_value data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) # We should still get foo because of caching assert data == 'foo', config_option_value # List buckets (empty result) handler = webserver.SequentialHandler() handler.add('GET', '/', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListAllMyBucketsResult> <Buckets> </Buckets> </ListAllMyBucketsResult> """) with webserver.install_http_handler(handler): dir_contents = gdal.ReadDir('/vsis3/') assert dir_contents == ['.'] gdal.VSICurlClearCache() # List buckets handler = webserver.SequentialHandler() handler.add('GET', '/', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListAllMyBucketsResult> <Buckets> <Bucket> <Name>mybucket</Name> </Bucket> </Buckets> </ListAllMyBucketsResult> """) with webserver.install_http_handler(handler): dir_contents = gdal.ReadDir('/vsis3/') assert dir_contents == ['mybucket'] # Test temporary redirect handler = webserver.SequentialHandler() class HandlerClass(object): def __init__(self, response_value): self.old_authorization = None self.response_value = response_value def method_req_1(self, request): if request.headers['Host'].find('127.0.0.1') < 0: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) return self.old_authorization = request.headers['Authorization'] request.protocol_version = 'HTTP/1.1' request.send_response(307) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>TemporaryRedirect</Code><Endpoint>localhost:%d</Endpoint></Error>' % request.server.port response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.end_headers() request.wfile.write(response.encode('ascii')) def method_req_2(self, request): if request.headers['Host'].find('localhost') < 0: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) return if self.old_authorization == request.headers['Authorization']: sys.stderr.write('Should have get a different Authorization. Bad headers: %s\n' % str(request.headers)) request.send_response(403) return request.protocol_version = 'HTTP/1.1' request.send_response(200) request.send_header('Content-type', 'application/xml') response = self.response_value request.send_header('Content-Length', len(response)) request.end_headers() request.wfile.write(response.encode('ascii')) h = HandlerClass("""<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix></Prefix> <CommonPrefixes> <Prefix>test</Prefix> </CommonPrefixes> </ListBucketResult> """) handler.add('GET', '/s3_test_temporary_redirect_read_dir/?delimiter=%2F', custom_method=h.method_req_1) handler.add('GET', '/s3_test_temporary_redirect_read_dir/?delimiter=%2F', custom_method=h.method_req_2) with webserver.install_http_handler(handler): dir_contents = gdal.ReadDir('/vsis3/s3_test_temporary_redirect_read_dir') assert dir_contents == ['test'] # Retry on the same bucket and check that the redirection was indeed temporary handler = webserver.SequentialHandler() h = HandlerClass("""<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>test/</Prefix> <CommonPrefixes> <Prefix>test/test2</Prefix> </CommonPrefixes> </ListBucketResult> """) handler.add('GET', '/s3_test_temporary_redirect_read_dir/?delimiter=%2F&prefix=test%2F', custom_method=h.method_req_1) handler.add('GET', '/s3_test_temporary_redirect_read_dir/?delimiter=%2F&prefix=test%2F', custom_method=h.method_req_2) with webserver.install_http_handler(handler): dir_contents = gdal.ReadDir('/vsis3/s3_test_temporary_redirect_read_dir/test') assert dir_contents == ['test2'] ############################################################################### # Test OpenDir() with a fake AWS server def test_vsis3_opendir(): if gdaltest.webserver_port == 0: pytest.skip() # Unlimited depth handler = webserver.SequentialHandler() handler.add('GET', '/vsis3_opendir/', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix/> <Marker/> <Contents> <Key>test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> <Contents> <Key>subdir/</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>0</Size> </Contents> <Contents> <Key>subdir/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>5</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): d = gdal.OpenDir('/vsis3/vsis3_opendir') assert d is not None entry = gdal.GetNextDirEntry(d) assert entry.name == 'test.txt' assert entry.size == 40 assert entry.mode == 32768 assert entry.mtime == 1 entry = gdal.GetNextDirEntry(d) assert entry.name == 'subdir' assert entry.mode == 16384 entry = gdal.GetNextDirEntry(d) assert entry.name == 'subdir/test.txt' entry = gdal.GetNextDirEntry(d) assert entry is None gdal.CloseDir(d) # Depth = 0 handler = webserver.SequentialHandler() handler.add('GET', '/vsis3_opendir/?delimiter=%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix/> <Marker/> <Contents> <Key>test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> <CommonPrefixes> <Prefix>subdir/</Prefix> </CommonPrefixes> </ListBucketResult> """) with webserver.install_http_handler(handler): d = gdal.OpenDir('/vsis3/vsis3_opendir', 0) assert d is not None entry = gdal.GetNextDirEntry(d) assert entry.name == 'test.txt' assert entry.size == 40 assert entry.mode == 32768 assert entry.mtime == 1 entry = gdal.GetNextDirEntry(d) assert entry.name == 'subdir' assert entry.mode == 16384 entry = gdal.GetNextDirEntry(d) assert entry is None gdal.CloseDir(d) # Depth = 1 handler = webserver.SequentialHandler() handler.add('GET', '/vsis3_opendir/?delimiter=%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix/> <Marker/> <Contents> <Key>test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> <CommonPrefixes> <Prefix>subdir/</Prefix> </CommonPrefixes> </ListBucketResult> """) with webserver.install_http_handler(handler): d = gdal.OpenDir('/vsis3/vsis3_opendir', 1) assert d is not None entry = gdal.GetNextDirEntry(d) assert entry.name == 'test.txt' assert entry.size == 40 assert entry.mode == 32768 assert entry.mtime == 1 entry = gdal.GetNextDirEntry(d) assert entry.name == 'subdir' assert entry.mode == 16384 handler = webserver.SequentialHandler() handler.add('GET', '/vsis3_opendir/?delimiter=%2F&prefix=subdir%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>subdir/</Prefix> <Marker/> <Contents> <Key>subdir/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>5</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): entry = gdal.GetNextDirEntry(d) assert entry.name == 'subdir/test.txt' entry = gdal.GetNextDirEntry(d) assert entry is None gdal.CloseDir(d) ############################################################################### # Test simple PUT support with a fake AWS server def test_vsis3_4(): if gdaltest.webserver_port == 0: pytest.skip() with webserver.install_http_handler(webserver.SequentialHandler()): with gdaltest.error_handler(): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3', 'wb') assert f is None handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket3/empty_file.bin', 200, {'Connection': 'close'}, 'foo') with webserver.install_http_handler(handler): assert gdal.VSIStatL('/vsis3/s3_fake_bucket3/empty_file.bin').size == 3 # Empty file handler = webserver.SequentialHandler() def method(request): if request.headers['Content-Length'] != '0': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/s3_fake_bucket3/empty_file.bin', custom_method=method) with webserver.install_http_handler(handler): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file.bin', 'wb') assert f is not None gdal.ErrorReset() gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() == '' handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket3/empty_file.bin', 200, {'Connection': 'close'}, '') with webserver.install_http_handler(handler): assert gdal.VSIStatL('/vsis3/s3_fake_bucket3/empty_file.bin').size == 0 # Invalid seek handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file.bin', 'wb') assert f is not None with gdaltest.error_handler(): ret = gdal.VSIFSeekL(f, 1, 0) assert ret != 0 gdal.VSIFCloseL(f) # Invalid read handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file.bin', 'wb') assert f is not None with gdaltest.error_handler(): ret = gdal.VSIFReadL(1, 1, f) assert not ret gdal.VSIFCloseL(f) # Error case handler = webserver.SequentialHandler() handler.add('PUT', '/s3_fake_bucket3/empty_file_error.bin', 403) with webserver.install_http_handler(handler): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file_error.bin', 'wb') assert f is not None gdal.ErrorReset() with gdaltest.error_handler(): gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() != '' # Nominal case gdal.NetworkStatsReset() with gdaltest.config_option('CPL_VSIL_NETWORK_STATS_ENABLED', 'YES'): with webserver.install_http_handler(webserver.SequentialHandler()): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/another_file.bin', 'wb') assert f is not None assert gdal.VSIFSeekL(f, gdal.VSIFTellL(f), 0) == 0 assert gdal.VSIFSeekL(f, 0, 1) == 0 assert gdal.VSIFSeekL(f, 0, 2) == 0 assert gdal.VSIFWriteL('foo', 1, 3, f) == 3 assert gdal.VSIFSeekL(f, gdal.VSIFTellL(f), 0) == 0 assert gdal.VSIFWriteL('bar', 1, 3, f) == 3 handler = webserver.SequentialHandler() def method(request): if request.headers['Content-Length'] != '6': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii')) content = request.rfile.read(6).decode('ascii') if content != 'foobar': sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/s3_fake_bucket3/another_file.bin', custom_method=method) gdal.ErrorReset() with webserver.install_http_handler(handler): gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() == '' j = json.loads(gdal.NetworkStatsGetAsSerializedJSON()) #print(j) assert j == { "methods": { "PUT": { "count": 1, "uploaded_bytes": 6 } }, "handlers": { "vsis3": { "files": { "/vsis3/s3_fake_bucket3/another_file.bin": { "methods": { "PUT": { "count": 1, "uploaded_bytes": 6 } }, "actions": { "Write": { "methods": { "PUT": { "count": 1, "uploaded_bytes": 6 } } } } } }, "methods": { "PUT": { "count": 1, "uploaded_bytes": 6 } } } } } gdal.NetworkStatsReset() # Redirect case with webserver.install_http_handler(webserver.SequentialHandler()): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/redirect', 'wb') assert f is not None assert gdal.VSIFWriteL('foobar', 1, 6, f) == 6 handler = webserver.SequentialHandler() def method(request): request.protocol_version = 'HTTP/1.1' if request.headers['Authorization'].find('us-east-1') >= 0: request.send_response(400) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>AuthorizationHeaderMalformed</Code><Region>us-west-2</Region></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.end_headers() request.wfile.write(response.encode('ascii')) elif request.headers['Authorization'].find('us-west-2') >= 0: if request.headers['Content-Length'] != '6': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii')) content = request.rfile.read(6).decode('ascii') if content != 'foobar': sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/s3_fake_bucket3/redirect', custom_method=method) handler.add('PUT', '/s3_fake_bucket3/redirect', custom_method=method) gdal.ErrorReset() with webserver.install_http_handler(handler): gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() == '' ############################################################################### # Test simple PUT support with retry logic def test_vsis3_write_single_put_retry(): if gdaltest.webserver_port == 0: pytest.skip() with gdaltest.config_options({'GDAL_HTTP_MAX_RETRY': '2', 'GDAL_HTTP_RETRY_DELAY': '0.01'}): with webserver.install_http_handler(webserver.SequentialHandler()): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/put_with_retry.bin', 'wb') assert f is not None assert gdal.VSIFWriteL('foo', 1, 3, f) == 3 handler = webserver.SequentialHandler() def method(request): if request.headers['Content-Length'] != '3': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii')) content = request.rfile.read(3).decode('ascii') if content != 'foo': sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/s3_fake_bucket3/put_with_retry.bin', 502) handler.add('PUT', '/s3_fake_bucket3/put_with_retry.bin', custom_method=method) with gdaltest.error_handler(): with webserver.install_http_handler(handler): gdal.VSIFCloseL(f) ############################################################################### # Test simple DELETE support with a fake AWS server def test_vsis3_5(): if gdaltest.webserver_port == 0: pytest.skip() with webserver.install_http_handler(webserver.SequentialHandler()): with gdaltest.error_handler(): ret = gdal.Unlink('/vsis3/foo') assert ret != 0 handler = webserver.SequentialHandler() handler.add('GET', '/s3_delete_bucket/delete_file', 200, {'Connection': 'close'}, 'foo') with webserver.install_http_handler(handler): assert gdal.VSIStatL('/vsis3/s3_delete_bucket/delete_file').size == 3 handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): assert gdal.VSIStatL('/vsis3/s3_delete_bucket/delete_file').size == 3 handler = webserver.SequentialHandler() handler.add('DELETE', '/s3_delete_bucket/delete_file', 204) with webserver.install_http_handler(handler): ret = gdal.Unlink('/vsis3/s3_delete_bucket/delete_file') assert ret == 0 handler = webserver.SequentialHandler() handler.add('GET', '/s3_delete_bucket/delete_file', 404, {'Connection': 'close'}) handler.add('GET', '/s3_delete_bucket/?delimiter=%2F&max-keys=100&prefix=delete_file%2F', 404, {'Connection': 'close'}) with webserver.install_http_handler(handler): assert gdal.VSIStatL('/vsis3/s3_delete_bucket/delete_file') is None handler = webserver.SequentialHandler() handler.add('GET', '/s3_delete_bucket/delete_file_error', 200) handler.add('DELETE', '/s3_delete_bucket/delete_file_error', 403) with webserver.install_http_handler(handler): with gdaltest.error_handler(): ret = gdal.Unlink('/vsis3/s3_delete_bucket/delete_file_error') assert ret != 0 handler = webserver.SequentialHandler() handler.add('GET', '/s3_delete_bucket/redirect', 200) def method(request): request.protocol_version = 'HTTP/1.1' if request.headers['Authorization'].find('us-east-1') >= 0: request.send_response(400) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>AuthorizationHeaderMalformed</Code><Region>us-west-2</Region></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.end_headers() request.wfile.write(response.encode('ascii')) elif request.headers['Authorization'].find('us-west-2') >= 0: request.send_response(204) request.send_header('Content-Length', 0) request.end_headers() else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) request.send_header('Content-Length', 0) request.end_headers() handler.add('DELETE', '/s3_delete_bucket/redirect', custom_method=method) handler.add('DELETE', '/s3_delete_bucket/redirect', custom_method=method) with webserver.install_http_handler(handler): ret = gdal.Unlink('/vsis3/s3_delete_bucket/redirect') assert ret == 0 ############################################################################### # Test DeleteObjects with a fake AWS server def test_vsis3_unlink_batch(): if gdaltest.webserver_port == 0: pytest.skip() def method(request): if request.headers['Content-MD5'] != 'Ze0X4LdlTwCsT+WpNxD9FA==': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(403) return content = request.rfile.read(int(request.headers['Content-Length'])).decode('ascii') if content != """<?xml version="1.0" encoding="UTF-8"?> <Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Object> <Key>foo</Key> </Object> <Object> <Key>bar/baz</Key> </Object> </Delete> """: sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(403) return request.protocol_version = 'HTTP/1.1' request.send_response(200) response = """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>foo</Key></Deleted><Deleted><Key>bar/baz</Key></Deleted></DeleteResult>""" request.send_header('Content-Length', len(response)) request.send_header('Connection', 'close') request.end_headers() request.wfile.write(response.encode('ascii')) handler = webserver.SequentialHandler() handler.add('POST', '/unlink_batch/?delete', custom_method=method) handler.add('POST', '/unlink_batch/?delete', 200, {}, """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>baw</Key></Deleted></DeleteResult>""") with gdaltest.config_option('CPL_VSIS3_UNLINK_BATCH_SIZE', '2'): with webserver.install_http_handler(handler): ret = gdal.UnlinkBatch(['/vsis3/unlink_batch/foo', '/vsis3/unlink_batch/bar/baz', '/vsis3/unlink_batch/baw']) assert ret handler = webserver.SequentialHandler() handler.add('POST', '/unlink_batch/?delete', 200, {}, """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Failed><Key>foo</Key></Failed></DeleteResult>""") with webserver.install_http_handler(handler): ret = gdal.UnlinkBatch(['/vsis3/unlink_batch/foo']) assert not ret ############################################################################### # Test RmdirRecursive() with a fake AWS server def test_vsis3_rmdir_recursive(): if gdaltest.webserver_port == 0: pytest.skip() handler = webserver.SequentialHandler() handler.add('GET', '/test_rmdir_recursive/?prefix=somedir%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>somedir/</Prefix> <Marker/> <Contents> <Key>somedir/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> <Contents> <Key>somedir/subdir/</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>0</Size> </Contents> <Contents> <Key>somedir/subdir/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>5</Size> </Contents> </ListBucketResult> """) def method(request): content = request.rfile.read(int(request.headers['Content-Length'])).decode('ascii') if content != """<?xml version="1.0" encoding="UTF-8"?> <Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Object> <Key>somedir/test.txt</Key> </Object> <Object> <Key>somedir/subdir/</Key> </Object> </Delete> """: sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(403) return request.protocol_version = 'HTTP/1.1' request.send_response(200) response = """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>somedir/test.txt</Key></Deleted><Deleted><Key>somedir/subdir/</Key></Deleted></DeleteResult>""" request.send_header('Content-Length', len(response)) request.send_header('Connection', 'close') request.end_headers() request.wfile.write(response.encode('ascii')) handler.add('POST', '/test_rmdir_recursive/?delete', custom_method=method) def method(request): content = request.rfile.read(int(request.headers['Content-Length'])).decode('ascii') if content != """<?xml version="1.0" encoding="UTF-8"?> <Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Object> <Key>somedir/subdir/test.txt</Key> </Object> <Object> <Key>somedir/</Key> </Object> </Delete> """: sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(403) return request.protocol_version = 'HTTP/1.1' request.send_response(200) response = """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>somedir/subdir/test.txt</Key></Deleted><Deleted><Key>somedir/</Key></Deleted></DeleteResult>""" request.send_header('Content-Length', len(response)) request.send_header('Connection', 'close') request.end_headers() request.wfile.write(response.encode('ascii')) handler.add('POST', '/test_rmdir_recursive/?delete', custom_method=method) with gdaltest.config_option('CPL_VSIS3_UNLINK_BATCH_SIZE', '2'): with webserver.install_http_handler(handler): assert gdal.RmdirRecursive('/vsis3/test_rmdir_recursive/somedir') == 0 ############################################################################### # Test multipart upload with a fake AWS server def test_vsis3_6(): if gdaltest.webserver_port == 0: pytest.skip() with gdaltest.config_option('VSIS3_CHUNK_SIZE', '1'): # 1 MB with webserver.install_http_handler(webserver.SequentialHandler()): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket4/large_file.bin', 'wb') assert f is not None size = 1024 * 1024 + 1 big_buffer = 'a' * size handler = webserver.SequentialHandler() def method(request): request.protocol_version = 'HTTP/1.1' if request.headers['Authorization'].find('us-east-1') >= 0: request.send_response(400) response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>AuthorizationHeaderMalformed</Code><Region>us-west-2</Region></Error>' response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response) request.send_header('Content-type', 'application/xml') request.send_header('Transfer-Encoding', 'chunked') request.end_headers() request.wfile.write(response.encode('ascii')) elif request.headers['Authorization'].find('us-west-2') >= 0: response = '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>' request.send_response(200) request.send_header('Content-type', 'application/xml') request.send_header('Content-Length', len(response)) request.end_headers() request.wfile.write(response.encode('ascii')) else: sys.stderr.write('Bad headers: %s\n' % str(request.headers)) request.send_response(403) request.send_header('Content-Length', 0) request.end_headers() handler.add('POST', '/s3_fake_bucket4/large_file.bin?uploads', custom_method=method) handler.add('POST', '/s3_fake_bucket4/large_file.bin?uploads', custom_method=method) def method(request): if request.headers['Content-Length'] != '1048576': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('ETag', '"first_etag"') request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/s3_fake_bucket4/large_file.bin?partNumber=1&uploadId=my_id', custom_method=method) with webserver.install_http_handler(handler): ret = gdal.VSIFWriteL(big_buffer, 1, size, f) assert ret == size handler = webserver.SequentialHandler() def method(request): if request.headers['Content-Length'] != '1': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return request.send_response(200) request.send_header('ETag', '"second_etag"') request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/s3_fake_bucket4/large_file.bin?partNumber=2&uploadId=my_id', custom_method=method) def method(request): if request.headers['Content-Length'] != '186': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return content = request.rfile.read(186).decode('ascii') if content != """<CompleteMultipartUpload> <Part> <PartNumber>1</PartNumber><ETag>"first_etag"</ETag></Part> <Part> <PartNumber>2</PartNumber><ETag>"second_etag"</ETag></Part> </CompleteMultipartUpload> """: sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('POST', '/s3_fake_bucket4/large_file.bin?uploadId=my_id', custom_method=method) gdal.ErrorReset() with webserver.install_http_handler(handler): gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() == '' handler = webserver.SequentialHandler() handler.add('POST', '/s3_fake_bucket4/large_file_initiate_403_error.bin?uploads', 403) handler.add('POST', '/s3_fake_bucket4/large_file_initiate_empty_result.bin?uploads', 200) handler.add('POST', '/s3_fake_bucket4/large_file_initiate_invalid_xml_result.bin?uploads', 200, {}, 'foo') handler.add('POST', '/s3_fake_bucket4/large_file_initiate_no_uploadId.bin?uploads', 200, {}, '<foo/>') with webserver.install_http_handler(handler): for filename in ['/vsis3/s3_fake_bucket4/large_file_initiate_403_error.bin', '/vsis3/s3_fake_bucket4/large_file_initiate_empty_result.bin', '/vsis3/s3_fake_bucket4/large_file_initiate_invalid_xml_result.bin', '/vsis3/s3_fake_bucket4/large_file_initiate_no_uploadId.bin']: with gdaltest.config_option('VSIS3_CHUNK_SIZE', '1'): # 1 MB f = gdal.VSIFOpenL(filename, 'wb') assert f is not None with gdaltest.error_handler(): ret = gdal.VSIFWriteL(big_buffer, 1, size, f) assert ret == 0 gdal.ErrorReset() gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() == '' handler = webserver.SequentialHandler() handler.add('POST', '/s3_fake_bucket4/large_file_upload_part_403_error.bin?uploads', 200, {}, '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>') handler.add('PUT', '/s3_fake_bucket4/large_file_upload_part_403_error.bin?partNumber=1&uploadId=my_id', 403) handler.add('DELETE', '/s3_fake_bucket4/large_file_upload_part_403_error.bin?uploadId=my_id', 204) handler.add('POST', '/s3_fake_bucket4/large_file_upload_part_no_etag.bin?uploads', 200, {}, '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>') handler.add('PUT', '/s3_fake_bucket4/large_file_upload_part_no_etag.bin?partNumber=1&uploadId=my_id', 200) handler.add('DELETE', '/s3_fake_bucket4/large_file_upload_part_no_etag.bin?uploadId=my_id', 204) with webserver.install_http_handler(handler): for filename in ['/vsis3/s3_fake_bucket4/large_file_upload_part_403_error.bin', '/vsis3/s3_fake_bucket4/large_file_upload_part_no_etag.bin']: with gdaltest.config_option('VSIS3_CHUNK_SIZE', '1'): # 1 MB f = gdal.VSIFOpenL(filename, 'wb') assert f is not None, filename with gdaltest.error_handler(): ret = gdal.VSIFWriteL(big_buffer, 1, size, f) assert ret == 0, filename gdal.ErrorReset() gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() == '', filename # Simulate failure in AbortMultipart stage handler = webserver.SequentialHandler() handler.add('POST', '/s3_fake_bucket4/large_file_abortmultipart_403_error.bin?uploads', 200, {}, '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>') handler.add('PUT', '/s3_fake_bucket4/large_file_abortmultipart_403_error.bin?partNumber=1&uploadId=my_id', 403) handler.add('DELETE', '/s3_fake_bucket4/large_file_abortmultipart_403_error.bin?uploadId=my_id', 403) filename = '/vsis3/s3_fake_bucket4/large_file_abortmultipart_403_error.bin' with webserver.install_http_handler(handler): with gdaltest.config_option('VSIS3_CHUNK_SIZE', '1'): # 1 MB f = gdal.VSIFOpenL(filename, 'wb') assert f is not None, filename with gdaltest.error_handler(): ret = gdal.VSIFWriteL(big_buffer, 1, size, f) assert ret == 0, filename gdal.ErrorReset() with gdaltest.error_handler(): gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() != '', filename # Simulate failure in CompleteMultipartUpload stage handler = webserver.SequentialHandler() handler.add('POST', '/s3_fake_bucket4/large_file_completemultipart_403_error.bin?uploads', 200, {}, '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>') handler.add('PUT', '/s3_fake_bucket4/large_file_completemultipart_403_error.bin?partNumber=1&uploadId=my_id', 200, {'ETag': 'first_etag'}, '') handler.add('PUT', '/s3_fake_bucket4/large_file_completemultipart_403_error.bin?partNumber=2&uploadId=my_id', 200, {'ETag': 'second_etag'}, '') handler.add('POST', '/s3_fake_bucket4/large_file_completemultipart_403_error.bin?uploadId=my_id', 403) # handler.add('DELETE', '/s3_fake_bucket4/large_file_completemultipart_403_error.bin?uploadId=my_id', 204) filename = '/vsis3/s3_fake_bucket4/large_file_completemultipart_403_error.bin' with webserver.install_http_handler(handler): with gdaltest.config_option('VSIS3_CHUNK_SIZE', '1'): # 1 MB f = gdal.VSIFOpenL(filename, 'wb') assert f is not None, filename ret = gdal.VSIFWriteL(big_buffer, 1, size, f) assert ret == size, filename gdal.ErrorReset() with gdaltest.error_handler(): gdal.VSIFCloseL(f) assert gdal.GetLastErrorMsg() != '', filename ############################################################################### # Test multipart upload with retry logic def test_vsis3_write_multipart_retry(): if gdaltest.webserver_port == 0: pytest.skip() with gdaltest.config_options({'GDAL_HTTP_MAX_RETRY': '2', 'GDAL_HTTP_RETRY_DELAY': '0.01'}): with gdaltest.config_option('VSIS3_CHUNK_SIZE', '1'): # 1 MB with webserver.install_http_handler(webserver.SequentialHandler()): f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket4/large_file.bin', 'wb') assert f is not None size = 1024 * 1024 + 1 big_buffer = 'a' * size handler = webserver.SequentialHandler() response = '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>' handler.add('POST', '/s3_fake_bucket4/large_file.bin?uploads', 502) handler.add('POST', '/s3_fake_bucket4/large_file.bin?uploads', 200, {'Content-type': 'application/xml', 'Content-Length': len(response), 'Connection': 'close'}, response) handler.add('PUT', '/s3_fake_bucket4/large_file.bin?partNumber=1&uploadId=my_id', 502) handler.add('PUT', '/s3_fake_bucket4/large_file.bin?partNumber=1&uploadId=my_id', 200, {'Content-Length': '0', 'ETag': '"first_etag"', 'Connection': 'close'}, {}) with gdaltest.error_handler(): with webserver.install_http_handler(handler): ret = gdal.VSIFWriteL(big_buffer, 1, size, f) assert ret == size handler = webserver.SequentialHandler() handler.add('PUT', '/s3_fake_bucket4/large_file.bin?partNumber=2&uploadId=my_id', 200, {'Content-Length': '0', 'ETag': '"second_etag"', 'Connection': 'close'}, {}) handler.add('POST', '/s3_fake_bucket4/large_file.bin?uploadId=my_id', 502) handler.add('POST', '/s3_fake_bucket4/large_file.bin?uploadId=my_id', 200, {'Content-Length': '0', 'Connection': 'close'}, {}) with gdaltest.error_handler(): with webserver.install_http_handler(handler): gdal.VSIFCloseL(f) ############################################################################### # Test Mkdir() / Rmdir() def test_vsis3_7(): if gdaltest.webserver_port == 0: pytest.skip() handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_mkdir/dir/', 404, {'Connection': 'close'}) handler.add('GET', '/s3_bucket_test_mkdir/?delimiter=%2F&max-keys=100&prefix=dir%2F', 404, {'Connection': 'close'}) handler.add('PUT', '/s3_bucket_test_mkdir/dir/', 200) with webserver.install_http_handler(handler): ret = gdal.Mkdir('/vsis3/s3_bucket_test_mkdir/dir', 0) assert ret == 0 assert stat.S_ISDIR(gdal.VSIStatL('/vsis3/s3_bucket_test_mkdir/dir').mode) dir_content = gdal.ReadDir('/vsis3/s3_bucket_test_mkdir/dir') assert dir_content == ['.'] # Try creating already existing directory handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_mkdir/dir/', 416, {'Connection': 'close'}) with webserver.install_http_handler(handler): ret = gdal.Mkdir('/vsis3/s3_bucket_test_mkdir/dir', 0) assert ret != 0 handler = webserver.SequentialHandler() handler.add('DELETE', '/s3_bucket_test_mkdir/dir/', 204) with webserver.install_http_handler(handler): ret = gdal.Rmdir('/vsis3/s3_bucket_test_mkdir/dir') assert ret == 0 # Try deleting already deleted directory handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_mkdir/dir/', 404) handler.add('GET', '/s3_bucket_test_mkdir/?delimiter=%2F&max-keys=100&prefix=dir%2F', 404, {'Connection': 'close'}) with webserver.install_http_handler(handler): ret = gdal.Rmdir('/vsis3/s3_bucket_test_mkdir/dir') assert ret != 0 # Try deleting non-empty directory handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_mkdir/dir_nonempty/', 416) handler.add('GET', '/s3_bucket_test_mkdir/?delimiter=%2F&max-keys=100&prefix=dir_nonempty%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>dir_nonempty/</Prefix> <Contents> <Key>dir_nonempty/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): ret = gdal.Rmdir('/vsis3/s3_bucket_test_mkdir/dir_nonempty') assert ret != 0 # Try stat'ing a directory not ending with slash handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_dir_stat/test_dir_stat', 400) handler.add('GET', '/s3_bucket_test_dir_stat/?delimiter=%2F&max-keys=100&prefix=test_dir_stat%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>test_dir_stat/</Prefix> <Contents> <Key>test_dir_stat/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): assert stat.S_ISDIR(gdal.VSIStatL('/vsis3/s3_bucket_test_dir_stat/test_dir_stat').mode) # Try ReadDi'ing a directory not ending with slash handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_readdir/?delimiter=%2F&prefix=test_dirread%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>test_dirread/</Prefix> <Contents> <Key>test_dirread/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): assert gdal.ReadDir('/vsis3/s3_bucket_test_readdir/test_dirread') is not None # Try stat'ing a directory ending with slash handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_dir_stat_2/test_dir_stat/', 400) handler.add('GET', '/s3_bucket_test_dir_stat_2/?delimiter=%2F&max-keys=100&prefix=test_dir_stat%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>test_dir_stat/</Prefix> <Contents> <Key>test_dir_stat/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): assert stat.S_ISDIR(gdal.VSIStatL('/vsis3/s3_bucket_test_dir_stat_2/test_dir_stat/').mode) # Try ReadDi'ing a directory ending with slash handler = webserver.SequentialHandler() handler.add('GET', '/s3_bucket_test_readdir2/?delimiter=%2F&prefix=test_dirread%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>test_dirread/</Prefix> <Contents> <Key>test_dirread/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): assert gdal.ReadDir('/vsis3/s3_bucket_test_readdir2/test_dirread') is not None ############################################################################### # Test handling of file and directory with same name def test_vsis3_8(): if gdaltest.webserver_port == 0: pytest.skip() handler = webserver.SequentialHandler() handler.add('GET', '/vsis3_8/?delimiter=%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix></Prefix> <Contents> <Key>test</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>40</Size> </Contents> <CommonPrefixes> <Prefix>test/</Prefix> </CommonPrefixes> </ListBucketResult> """) with webserver.install_http_handler(handler): listdir = gdal.ReadDir('/vsis3/vsis3_8', 0) assert listdir == ['test', 'test/'] handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): assert not stat.S_ISDIR(gdal.VSIStatL('/vsis3/vsis3_8/test').mode) handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): assert stat.S_ISDIR(gdal.VSIStatL('/vsis3/vsis3_8/test/').mode) ############################################################################### # Test vsisync() with SYNC_STRATEGY=ETAG def test_vsis3_sync_etag(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() options = ['SYNC_STRATEGY=ETAG'] with gdaltest.error_handler(): handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): assert not gdal.Sync('/i_do/not/exist', '/vsis3/', options=options) with gdaltest.error_handler(): handler = webserver.SequentialHandler() handler.add('GET', '/do_not/exist', 404) handler.add('GET', '/do_not/?delimiter=%2F&max-keys=100&prefix=exist%2F', 404) handler.add('PUT', '/do_not/exist', 404) with webserver.install_http_handler(handler): assert not gdal.Sync('vsifile.py', '/vsis3/do_not/exist', options=options) handler = webserver.SequentialHandler() handler.add('GET', '/out/', 200) handler.add('GET', '/out/testsync.txt', 404) handler.add('GET', '/out/?delimiter=%2F&max-keys=100&prefix=testsync.txt%2F', 404) def method(request): if request.headers['Content-Length'] != '3': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii')) content = request.rfile.read(3).decode('ascii') if content != 'foo': sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('Content-Length', 0) request.send_header('ETag', '"acbd18db4cc2f85cedef654fccc4a4d8"') request.end_headers() handler.add('PUT', '/out/testsync.txt', custom_method=method) gdal.FileFromMemBuffer('/vsimem/testsync.txt', 'foo') def cbk(pct, _, tab): assert pct > tab[0] tab[0] = pct return True tab = [ 0 ] with webserver.install_http_handler(handler): assert gdal.Sync('/vsimem/testsync.txt', '/vsis3/out', options=options, callback=cbk, callback_data=tab) assert tab[0] == 1.0 # Re-try with cached ETag. Should generate no network access handler = webserver.SequentialHandler() with webserver.install_http_handler(handler): assert gdal.Sync('/vsimem/testsync.txt', '/vsis3/out', options=options) assert gdal.Sync('/vsimem/testsync.txt', '/vsis3/out/testsync.txt', options=options) gdal.VSICurlClearCache() # Other direction: S3 to /vsimem handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'ETag' : '"acbd18db4cc2f85cedef654fccc4a4d8"' }, "foo") with webserver.install_http_handler(handler): assert gdal.Sync( '/vsis3/out/testsync.txt', '/vsimem/', options=options) # Shouldn't do any copy, but hard to verify with webserver.install_http_handler(webserver.SequentialHandler()): assert gdal.Sync( '/vsis3/out/testsync.txt', '/vsimem/', options=options) assert gdal.Sync( '/vsis3/out/testsync.txt', '/vsimem/testsync.txt', options=options) # Modify target file, and redo synchronization gdal.FileFromMemBuffer('/vsimem/testsync.txt', 'bar') handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 200, { 'Content-Length' : '3', 'ETag' : '"acbd18db4cc2f85cedef654fccc4a4d8"' }, "foo") with webserver.install_http_handler(handler): assert gdal.Sync( '/vsis3/out/testsync.txt', '/vsimem/', options=options) f = gdal.VSIFOpenL('/vsimem/testsync.txt', 'rb') data = gdal.VSIFReadL(1, 3, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' # /vsimem to S3, but after cleaning the cache gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/out/', 200) handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'ETag' : '"acbd18db4cc2f85cedef654fccc4a4d8"' }, "foo") with webserver.install_http_handler(handler): assert gdal.Sync('/vsimem/testsync.txt', '/vsis3/out', options=options) gdal.Unlink('/vsimem/testsync.txt') # Directory copying gdal.VSICurlClearCache() gdal.Mkdir('/vsimem/subdir', 0) gdal.FileFromMemBuffer('/vsimem/subdir/testsync.txt', 'foo') handler = webserver.SequentialHandler() handler.add('GET', '/out/', 200, {}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix/> <Marker/> <IsTruncated>false</IsTruncated> <Contents> <Key>testsync.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>3</Size> <ETag>"acbd18db4cc2f85cedef654fccc4a4d8"</ETag> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): assert gdal.Sync('/vsimem/subdir/', '/vsis3/out', options=options) gdal.RmdirRecursive('/vsimem/subdir') ############################################################################### # Test vsisync() with SYNC_STRATEGY=TIMESTAMP def test_vsis3_sync_timestamp(): if gdaltest.webserver_port == 0: pytest.skip() options = ['SYNC_STRATEGY=TIMESTAMP'] gdal.FileFromMemBuffer('/vsimem/testsync.txt', 'foo') # S3 to local: S3 file is older -> download gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'Last-Modified': 'Mon, 01 Jan 1970 00:00:01 GMT' }, "foo") handler.add('GET', '/out/testsync.txt', 200, { 'Content-Length' : '3', 'Last-Modified': 'Mon, 01 Jan 1970 00:00:01 GMT' }, "foo") with webserver.install_http_handler(handler): assert gdal.Sync( '/vsis3/out/testsync.txt', '/vsimem/', options=options) # S3 to local: S3 file is newer -> do nothing gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'Last-Modified': 'Mon, 01 Jan 2037 00:00:01 GMT' }, "foo") with webserver.install_http_handler(handler): assert gdal.Sync( '/vsis3/out/testsync.txt', '/vsimem/', options=options) # Local to S3: S3 file is older -> upload gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'Last-Modified': 'Mon, 01 Jan 1970 00:00:01 GMT' }, "foo") handler.add('PUT', '/out/testsync.txt', 200) with webserver.install_http_handler(handler): assert gdal.Sync( '/vsimem/testsync.txt', '/vsis3/out/testsync.txt', options=options) # Local to S3: S3 file is newer -> do nothgin gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'Last-Modified': 'Mon, 01 Jan 2037 00:00:01 GMT' }, "foo") with webserver.install_http_handler(handler): assert gdal.Sync( '/vsimem/testsync.txt', '/vsis3/out/testsync.txt', options=options) gdal.Unlink('/vsimem/testsync.txt') ############################################################################### # Test vsisync() with SYNC_STRATEGY=OVERWRITE def test_vsis3_sync_overwrite(): if gdaltest.webserver_port == 0: pytest.skip() options = ['SYNC_STRATEGY=OVERWRITE'] gdal.FileFromMemBuffer('/vsimem/testsync.txt', 'foo') # S3 to local: S3 file is newer gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'Last-Modified': 'Mon, 01 Jan 2037 00:00:01 GMT' }, "foo") handler.add('GET', '/out/testsync.txt', 200, { 'Content-Length' : '3', 'Last-Modified': 'Mon, 01 Jan 2037 00:00:01 GMT' }, "foo") with webserver.install_http_handler(handler): assert gdal.Sync( '/vsis3/out/testsync.txt', '/vsimem/', options=options) # Local to S3: S3 file is newer gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/out/testsync.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'Last-Modified': 'Mon, 01 Jan 2037 00:00:01 GMT' }, "foo") handler.add('PUT', '/out/testsync.txt', 200) with webserver.install_http_handler(handler): assert gdal.Sync( '/vsimem/testsync.txt', '/vsis3/out/testsync.txt', options=options) gdal.Unlink('/vsimem/testsync.txt') ############################################################################### # Test vsisync() with source and target in /vsis3 def test_vsis3_sync_source_target_in_vsis3(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/in/testsync.txt', 200, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3', 'Last-Modified': 'Mon, 01 Jan 1970 00:00:01 GMT' }, "foo") handler.add('GET', '/out/', 200) handler.add('GET', '/out/testsync.txt', 200, { 'Content-Length' : '3', 'Last-Modified': 'Mon, 01 Jan 1970 00:00:01 GMT' }, "foo") def method(request): if request.headers['Content-Length'] != '0': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return if request.headers['x-amz-copy-source'] != '/in/testsync.txt': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/out/testsync.txt', custom_method=method) with webserver.install_http_handler(handler): assert gdal.Sync( '/vsis3/in/testsync.txt', '/vsis3/out/') ############################################################################### # Test rename def test_vsis3_fake_rename(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/test/source.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3' }, "foo") handler.add('GET', '/test/target.txt', 404) handler.add('GET', '/test/?delimiter=%2F&max-keys=100&prefix=target.txt%2F', 200) def method(request): if request.headers['Content-Length'] != '0': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return if request.headers['x-amz-copy-source'] != '/test/source.txt': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/test/target.txt', custom_method=method) handler.add('DELETE', '/test/source.txt', 204) with webserver.install_http_handler(handler): assert gdal.Rename( '/vsis3/test/source.txt', '/vsis3/test/target.txt') == 0 ############################################################################### # Test rename def test_vsis3_fake_rename_dir(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/test/source_dir', 404) handler.add('GET', '/test/?delimiter=%2F&max-keys=100&prefix=source_dir%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix>source_dir/</Prefix> <Contents> <Key>source_dir/test.txt</Key> <LastModified>1970-01-01T00:00:01.000Z</LastModified> <Size>3</Size> </Contents> </ListBucketResult> """) handler.add('GET', '/test/target_dir/', 404) handler.add('GET', '/test/?delimiter=%2F&max-keys=100&prefix=target_dir%2F', 404) def method(request): if request.headers['Content-Length'] != '0': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii')) request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/test/target_dir/', custom_method=method) def method(request): if request.headers['Content-Length'] != '0': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return if request.headers['x-amz-copy-source'] != '/test/source_dir/test.txt': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/test/target_dir/test.txt', custom_method=method) handler.add('DELETE', '/test/source_dir/test.txt', 204) handler.add('GET', '/test/source_dir/', 404) handler.add('GET', '/test/?delimiter=%2F&max-keys=100&prefix=source_dir%2F', 404) with webserver.install_http_handler(handler): assert gdal.Rename( '/vsis3/test/source_dir', '/vsis3/test/target_dir') == 0 ############################################################################### # Test rename onto existing dir is not allowed def test_vsis3_fake_rename_on_existing_dir(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/test/source.txt', 206, { 'Content-Length' : '3', 'Content-Range': 'bytes 0-2/3' }, "foo") handler.add('GET', '/test_target_dir/', 200) with webserver.install_http_handler(handler): assert gdal.Rename( '/vsis3/test/source.txt', '/vsis3/test_target_dir') == -1 ############################################################################### # Test Sync() and multithreaded download and CHUNK_SIZE def test_vsis3_fake_sync_multithreaded_upload_chunk_size(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() def cbk(pct, _, tab): assert pct >= tab[0] tab[0] = pct return True gdal.Mkdir('/vsimem/test', 0) gdal.FileFromMemBuffer('/vsimem/test/foo', 'foo\n') tab = [ -1 ] handler = webserver.SequentialHandler() handler.add('GET', '/test_bucket/?prefix=test%2F', 200) handler.add('GET', '/test_bucket/test', 404) handler.add('GET', '/test_bucket/?delimiter=%2F&max-keys=100&prefix=test%2F', 200) handler.add('GET', '/test_bucket/', 200) handler.add('GET', '/test_bucket/test/', 404) handler.add('PUT', '/test_bucket/test/', 200) def method(request): request.protocol_version = 'HTTP/1.1' response = '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>' request.send_response(200) request.send_header('Content-type', 'application/xml') request.send_header('Content-Length', len(response)) request.end_headers() request.wfile.write(response.encode('ascii')) handler.add('POST', '/test_bucket/test/foo?uploads', custom_method=method) def method(request): if request.headers['Content-Length'] != '3': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('ETag', '"first_etag"') request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/test_bucket/test/foo?partNumber=1&uploadId=my_id', custom_method=method) def method(request): if request.headers['Content-Length'] != '1': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('ETag', '"second_etag"') request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/test_bucket/test/foo?partNumber=2&uploadId=my_id', custom_method=method) def method(request): if request.headers['Content-Length'] != '186': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return content = request.rfile.read(186).decode('ascii') if content != """<CompleteMultipartUpload> <Part> <PartNumber>1</PartNumber><ETag>"first_etag"</ETag></Part> <Part> <PartNumber>2</PartNumber><ETag>"second_etag"</ETag></Part> </CompleteMultipartUpload> """: sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('POST', '/test_bucket/test/foo?uploadId=my_id', custom_method=method) with gdaltest.config_option('VSIS3_SIMULATE_THREADING', 'YES'): with webserver.install_http_handler(handler): assert gdal.Sync('/vsimem/test', '/vsis3/test_bucket', options=['NUM_THREADS=1', 'CHUNK_SIZE=3'], callback=cbk, callback_data=tab) assert tab[0] == 1.0 gdal.RmdirRecursive('/vsimem/test') def test_vsis3_fake_sync_multithreaded_upload_chunk_size_failure(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() gdal.Mkdir('/vsimem/test', 0) gdal.FileFromMemBuffer('/vsimem/test/foo', 'foo\n') handler = webserver.SequentialHandler() handler.add('GET', '/test_bucket/?prefix=test%2F', 200) handler.add('GET', '/test_bucket/test', 404) handler.add('GET', '/test_bucket/?delimiter=%2F&max-keys=100&prefix=test%2F', 200) handler.add('GET', '/test_bucket/', 200) handler.add('GET', '/test_bucket/test/', 404) handler.add('PUT', '/test_bucket/test/', 200) def method(request): request.protocol_version = 'HTTP/1.1' response = '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>' request.send_response(200) request.send_header('Content-type', 'application/xml') request.send_header('Content-Length', len(response)) request.end_headers() request.wfile.write(response.encode('ascii')) handler.add('POST', '/test_bucket/test/foo?uploads', custom_method=method) def method(request): if request.headers['Content-Length'] != '3': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('ETag', '"first_etag"') request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/test_bucket/test/foo?partNumber=1&uploadId=my_id', 400) handler.add('DELETE', '/test_bucket/test/foo?uploadId=my_id', 204) with gdaltest.config_options({'VSIS3_SIMULATE_THREADING': 'YES', 'VSIS3_SYNC_MULTITHREADING': 'NO'}): with webserver.install_http_handler(handler): with gdaltest.error_handler(): assert not gdal.Sync('/vsimem/test', '/vsis3/test_bucket', options=['NUM_THREADS=1', 'CHUNK_SIZE=3']) gdal.RmdirRecursive('/vsimem/test') ############################################################################### # Test reading/writing metadata def test_vsis3_metadata(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() # Read HEADERS domain handler = webserver.SequentialHandler() handler.add('GET', '/test_metadata/foo.txt', 200, {'foo': 'bar'}) with webserver.install_http_handler(handler): md = gdal.GetFileMetadata('/vsis3/test_metadata/foo.txt', 'HEADERS') assert 'foo' in md and md['foo'] == 'bar' # Read TAGS domain handler = webserver.SequentialHandler() handler.add('GET', '/test_metadata/foo.txt?tagging', 200, {}, """<Tagging><TagSet><Tag><Key>foo</Key><Value>bar</Value></Tag></TagSet></Tagging>""") with webserver.install_http_handler(handler): md = gdal.GetFileMetadata('/vsis3/test_metadata/foo.txt', 'TAGS') assert 'foo' in md and md['foo'] == 'bar' # Write HEADERS domain handler = webserver.SequentialHandler() def method(request): if request.headers['foo'] != 'bar': sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers)) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.end_headers() handler.add('PUT', '/test_metadata/foo.txt', custom_method=method) with webserver.install_http_handler(handler): assert gdal.SetFileMetadata('/vsis3/test_metadata/foo.txt', {'foo': 'bar'}, 'HEADERS') # Write TAGS domain handler = webserver.SequentialHandler() def method(request): request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii')) content = request.rfile.read(int(request.headers['Content-Length'])).decode('ascii') if content != """<?xml version="1.0" encoding="UTF-8"?> <Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <TagSet> <Tag> <Key>foo</Key> <Value>bar</Value> </Tag> </TagSet> </Tagging> """: sys.stderr.write('Did not get expected content: %s\n' % content) request.send_response(400) request.send_header('Content-Length', 0) request.end_headers() return request.send_response(200) request.send_header('Content-Length', 0) request.end_headers() handler.add('PUT', '/test_metadata/foo.txt?tagging', custom_method=method) with webserver.install_http_handler(handler): assert gdal.SetFileMetadata('/vsis3/test_metadata/foo.txt', {'foo': 'bar'}, 'TAGS') # Write TAGS domain (wiping tags) handler = webserver.SequentialHandler() handler.add('DELETE', '/test_metadata/foo.txt?tagging', 204) with webserver.install_http_handler(handler): assert gdal.SetFileMetadata('/vsis3/test_metadata/foo.txt', {}, 'TAGS') # Error case with gdaltest.error_handler(): assert gdal.GetFileMetadata('/vsis3/test_metadata/foo.txt', 'UNSUPPORTED') == {} # Error case with gdaltest.error_handler(): assert not gdal.SetFileMetadata('/vsis3/test_metadata/foo.txt', {}, 'UNSUPPORTED') ############################################################################### # Test that we take into account directory listing to avoid useless # requests def test_vsis3_no_useless_requests(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/no_useless_requests/?delimiter=%2F', 200, {'Content-type': 'application/xml'}, """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult> <Prefix></Prefix> <Contents> </Contents> </ListBucketResult> """) with webserver.install_http_handler(handler): assert gdal.VSIFOpenL('/vsis3/no_useless_requests/foo.txt', 'rb') is None assert gdal.VSIFOpenL('/vsis3/no_useless_requests/bar.txt', 'rb') is None assert gdal.VSIStatL('/vsis3/no_useless_requests/baz.txt') is None ############################################################################### # Test w+ access def test_vsis3_random_write(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() with gdaltest.error_handler(): assert gdal.VSIFOpenL('/vsis3/random_write/test.bin', 'w+b') is None with gdaltest.config_option('CPL_VSIL_USE_TEMP_FILE_FOR_RANDOM_WRITE', 'YES'): f = gdal.VSIFOpenL('/vsis3/random_write/test.bin', 'w+b') assert f assert gdal.VSIFWriteL('foo', 3, 1, f) == 1 assert gdal.VSIFSeekL(f, 0, 0) == 0 assert gdal.VSIFReadL(3, 1, f).decode('ascii') == 'foo' assert gdal.VSIFEofL(f) == 0 assert gdal.VSIFTellL(f) == 3 handler = webserver.SequentialHandler() handler.add('PUT', '/random_write/test.bin', 200, {}, expected_body=b'foo') with webserver.install_http_handler(handler): assert gdal.VSIFCloseL(f) == 0 ############################################################################### # Test w+ access def test_vsis3_random_write_failure_1(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() with gdaltest.config_option('CPL_VSIL_USE_TEMP_FILE_FOR_RANDOM_WRITE', 'YES'): f = gdal.VSIFOpenL('/vsis3/random_write/test.bin', 'w+b') assert f handler = webserver.SequentialHandler() handler.add('PUT', '/random_write/test.bin', 400, {}) with webserver.install_http_handler(handler): with gdaltest.error_handler(): assert gdal.VSIFCloseL(f) != 0 ############################################################################### # Test w+ access def test_vsis3_random_write_failure_2(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() with gdaltest.config_option('CPL_VSIL_USE_TEMP_FILE_FOR_RANDOM_WRITE', 'YES'): with gdaltest.config_option('VSIS3_CHUNK_SIZE_BYTES', '1'): f = gdal.VSIFOpenL('/vsis3/random_write/test.bin', 'w+b') assert f assert gdal.VSIFWriteL('foo', 3, 1, f) == 1 handler = webserver.SequentialHandler() handler.add('POST', '/random_write/test.bin?uploads', 400, {}) with webserver.install_http_handler(handler): with gdaltest.error_handler(): assert gdal.VSIFCloseL(f) != 0 ############################################################################### # Test w+ access def test_vsis3_random_write_gtiff_create_copy(): if gdaltest.webserver_port == 0: pytest.skip() gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('GET', '/random_write/test.tif', 404, {}) handler.add('GET', '/random_write/?delimiter=%2F&max-keys=100&prefix=test.tif%2F', 404, {}) handler.add('GET', '/random_write/?delimiter=%2F', 404, {}) src_ds = gdal.Open('data/byte.tif') with gdaltest.config_option('CPL_VSIL_USE_TEMP_FILE_FOR_RANDOM_WRITE', 'YES'): with webserver.install_http_handler(handler): ds = gdal.GetDriverByName('GTiff').CreateCopy('/vsis3/random_write/test.tif', src_ds) assert ds is not None handler = webserver.SequentialHandler() handler.add('PUT', '/random_write/test.tif', 200, {}) with webserver.install_http_handler(handler): ds = None ############################################################################### # Read credentials from simulated ~/.aws/credentials def test_vsis3_read_credentials_file(): if gdaltest.webserver_port == 0: pytest.skip() gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '/vsimem/aws_credentials') gdal.VSICurlClearCache() gdal.FileFromMemBuffer('/vsimem/aws_credentials', """ [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [default] aws_access_key_id = AWS_ACCESS_KEY_ID aws_secret_access_key = AWS_SECRET_ACCESS_KEY [unrelated] aws_access_key_id = foo aws_secret_access_key = bar """) handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '') gdal.Unlink('/vsimem/aws_credentials') ############################################################################### # Read credentials from simulated ~/.aws/config def test_vsis3_read_config_file(): if gdaltest.webserver_port == 0: pytest.skip() gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('AWS_CONFIG_FILE', '/vsimem/aws_config') gdal.VSICurlClearCache() gdal.FileFromMemBuffer('/vsimem/aws_config', """ [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [default] aws_access_key_id = AWS_ACCESS_KEY_ID aws_secret_access_key = AWS_SECRET_ACCESS_KEY region = us-east-1 [unrelated] aws_access_key_id = foo aws_secret_access_key = bar """) handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' gdal.SetConfigOption('AWS_CONFIG_FILE', '') gdal.Unlink('/vsimem/aws_config') ############################################################################### # Read credentials from simulated ~/.aws/credentials and ~/.aws/config def test_vsis3_read_credentials_config_file(): if gdaltest.webserver_port == 0: pytest.skip() gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '/vsimem/aws_credentials') gdal.SetConfigOption('AWS_CONFIG_FILE', '/vsimem/aws_config') gdal.VSICurlClearCache() gdal.FileFromMemBuffer('/vsimem/aws_credentials', """ [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [default] aws_access_key_id = AWS_ACCESS_KEY_ID aws_secret_access_key = AWS_SECRET_ACCESS_KEY [unrelated] aws_access_key_id = foo aws_secret_access_key = bar """) gdal.FileFromMemBuffer('/vsimem/aws_config', """ [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [default] aws_access_key_id = AWS_ACCESS_KEY_ID aws_secret_access_key = AWS_SECRET_ACCESS_KEY region = us-east-1 [unrelated] aws_access_key_id = foo aws_secret_access_key = bar """) handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '') gdal.Unlink('/vsimem/aws_credentials') gdal.SetConfigOption('AWS_CONFIG_FILE', '') gdal.Unlink('/vsimem/aws_config') ############################################################################### # Read credentials from simulated ~/.aws/credentials and ~/.aws/config with # a non default profile def test_vsis3_read_credentials_config_file_non_default_profile(tmpdir): if gdaltest.webserver_port == 0: pytest.skip() gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', None) gdal.SetConfigOption('AWS_CONFIG_FILE', None) gdal.SetConfigOption('AWS_PROFILE', 'myprofile') os_aws = tmpdir.mkdir(".aws") gdal.VSICurlClearCache() os_aws.join('credentials').write(""" [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [myprofile] aws_access_key_id = AWS_ACCESS_KEY_ID aws_secret_access_key = AWS_SECRET_ACCESS_KEY [default] aws_access_key_id = foo aws_secret_access_key = bar """) os_aws.join('config').write(""" [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [profile myprofile] region = us-east-1 [default] aws_access_key_id = foo aws_secret_access_key = bar """) handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): with gdaltest.config_option( 'USERPROFILE' if sys.platform == 'win32' else 'HOME', str(tmpdir) ): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' gdal.SetConfigOption('AWS_PROFILE', '') ############################################################################### # Read credentials from simulated ~/.aws/credentials and ~/.aws/config def test_vsis3_read_credentials_config_file_inconsistent(): if gdaltest.webserver_port == 0: pytest.skip() gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '/vsimem/aws_credentials') gdal.SetConfigOption('AWS_CONFIG_FILE', '/vsimem/aws_config') gdal.VSICurlClearCache() gdal.FileFromMemBuffer('/vsimem/aws_credentials', """ [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [default] aws_access_key_id = AWS_ACCESS_KEY_ID aws_secret_access_key = AWS_SECRET_ACCESS_KEY [unrelated] aws_access_key_id = foo aws_secret_access_key = bar """) gdal.FileFromMemBuffer('/vsimem/aws_config', """ [unrelated] aws_access_key_id = foo aws_secret_access_key = bar [default] aws_access_key_id = AWS_ACCESS_KEY_ID_inconsistent aws_secret_access_key = AWS_SECRET_ACCESS_KEY_inconsistent region = us-east-1 [unrelated] aws_access_key_id = foo aws_secret_access_key = bar """) gdal.ErrorReset() handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None assert gdal.GetLastErrorMsg() != '' data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '') gdal.Unlink('/vsimem/aws_credentials') gdal.SetConfigOption('AWS_CONFIG_FILE', '') gdal.Unlink('/vsimem/aws_config') ############################################################################### # Read credentials from simulated EC2 instance def test_vsis3_read_credentials_ec2_imdsv2(): if gdaltest.webserver_port == 0: pytest.skip() if sys.platform not in ('linux', 'linux2', 'win32'): pytest.skip() gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '') gdal.SetConfigOption('AWS_CONFIG_FILE', '') gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', 'http://localhost:%d' % gdaltest.webserver_port) # Disable hypervisor related check to test if we are really on EC2 gdal.SetConfigOption('CPL_AWS_AUTODETECT_EC2', 'NO') gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('PUT', '/latest/api/token', 200, {}, 'mytoken', expected_headers={'X-aws-ec2-metadata-token-ttl-seconds': '10'}) handler.add('GET', '/latest/meta-data/iam/security-credentials/', 200, {}, 'myprofile', expected_headers={'X-aws-ec2-metadata-token': 'mytoken'}) handler.add('GET', '/latest/meta-data/iam/security-credentials/myprofile', 200, {}, """{ "AccessKeyId": "AWS_ACCESS_KEY_ID", "SecretAccessKey": "AWS_SECRET_ACCESS_KEY", "Expiration": "3000-01-01T00:00:00Z" }""", expected_headers={'X-aws-ec2-metadata-token': 'mytoken'}) handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' # Set a fake URL to check that credentials re-use works gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', '') handler = webserver.SequentialHandler() handler.add('GET', '/s3_fake_bucket/bar', 200, {}, 'bar') with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/bar') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'bar' gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', '') gdal.SetConfigOption('CPL_AWS_AUTODETECT_EC2', None) ############################################################################### # Read credentials from simulated EC2 instance that only supports IMDSv1 def test_vsis3_read_credentials_ec2_imdsv1(): if gdaltest.webserver_port == 0: pytest.skip() if sys.platform not in ('linux', 'linux2', 'win32'): pytest.skip() gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '') gdal.SetConfigOption('AWS_CONFIG_FILE', '') gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', 'http://localhost:%d' % gdaltest.webserver_port) # Disable hypervisor related check to test if we are really on EC2 gdal.SetConfigOption('CPL_AWS_AUTODETECT_EC2', 'NO') gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('PUT', '/latest/api/token', 403, {}, expected_headers={'X-aws-ec2-metadata-token-ttl-seconds': '10'}) handler.add('GET', '/latest/meta-data/iam/security-credentials/', 200, {}, 'myprofile', unexpected_headers=['X-aws-ec2-metadata-token']) handler.add('GET', '/latest/meta-data/iam/security-credentials/myprofile', 200, {}, """{ "AccessKeyId": "AWS_ACCESS_KEY_ID", "SecretAccessKey": "AWS_SECRET_ACCESS_KEY", "Expiration": "3000-01-01T00:00:00Z" }""", unexpected_headers=['X-aws-ec2-metadata-token']) handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', '') gdal.SetConfigOption('CPL_AWS_AUTODETECT_EC2', None) ############################################################################### # Read credentials from simulated EC2 instance with expiration of the # cached credentials def test_vsis3_read_credentials_ec2_expiration(): if gdaltest.webserver_port == 0: pytest.skip() if sys.platform not in ('linux', 'linux2', 'win32'): pytest.skip() gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', '') gdal.SetConfigOption('AWS_CONFIG_FILE', '') gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', '') gdal.SetConfigOption('AWS_ACCESS_KEY_ID', '') gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', 'http://localhost:%d' % gdaltest.webserver_port) # Disable hypervisor related check to test if we are really on EC2 gdal.SetConfigOption('CPL_AWS_AUTODETECT_EC2', 'NO') gdal.VSICurlClearCache() handler = webserver.SequentialHandler() handler.add('PUT', '/latest/api/token', 200, {}, 'mytoken', expected_headers={'X-aws-ec2-metadata-token-ttl-seconds': '10'}) handler.add('GET', '/latest/meta-data/iam/security-credentials/', 200, {}, 'myprofile', expected_headers={'X-aws-ec2-metadata-token': '<PASSWORD>'}) handler.add('GET', '/latest/meta-data/iam/security-credentials/myprofile', 200, {}, """{ "AccessKeyId": "AWS_ACCESS_KEY_ID", "SecretAccessKey": "AWS_SECRET_ACCESS_KEY", "Expiration": "1970-01-01T00:00:00Z" }""", expected_headers={'X-aws-ec2-metadata-token': '<PASSWORD>'}) handler.add('PUT', '/latest/api/token', 200, {}, '<PASSWORD>', expected_headers={'X-aws-ec2-metadata-token-ttl-seconds': '10'}) handler.add('GET', '/latest/meta-data/iam/security-credentials/myprofile', 200, {}, """{ "AccessKeyId": "AWS_ACCESS_KEY_ID", "SecretAccessKey": "AWS_SECRET_ACCESS_KEY", "Expiration": "1970-01-01T00:00:00Z" }""", expected_headers={'X-aws-ec2-metadata-token': '<PASSWORD>'}) handler.add('GET', '/s3_fake_bucket/resource', custom_method=get_s3_fake_bucket_resource_method) with webserver.install_http_handler(handler): f = open_for_read('/vsis3/s3_fake_bucket/resource') assert f is not None data = gdal.VSIFReadL(1, 4, f).decode('ascii') gdal.VSIFCloseL(f) assert data == 'foo' # Set a fake URL to demonstrate we try to re-fetch credentials gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', 'http://localhost:%d/invalid' % gdaltest.webserver_port) handler = webserver.SequentialHandler() handler.add('PUT', '/invalid/latest/api/token', 404) handler.add('GET', '/invalid/latest/meta-data/iam/security-credentials/myprofile', 404) with webserver.install_http_handler(handler): with gdaltest.error_handler(): f = open_for_read('/vsis3/s3_fake_bucket/bar') assert f is None gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', '') gdal.SetConfigOption('CPL_AWS_AUTODETECT_EC2', None) ############################################################################### def test_vsis3_stop_webserver(): if gdaltest.webserver_port == 0: pytest.skip() # Clearcache needed to close all connections, since the Python server # can only handle one connection at a time gdal.VSICurlClearCache() webserver.server_stop(gdaltest.webserver_process, gdaltest.webserver_port) ############################################################################### # Nominal cases (require valid credentials) def test_vsis3_extra_1(): if not gdaltest.built_against_curl(): pytest.skip() credentials_filename = gdal.GetConfigOption('HOME', gdal.GetConfigOption('USERPROFILE', '')) + '/.aws/credentials' # Either a bucket name or bucket/filename s3_resource = gdal.GetConfigOption('S3_RESOURCE') if not os.path.exists(credentials_filename): if gdal.GetConfigOption('AWS_SECRET_ACCESS_KEY') is None: pytest.skip('Missing AWS_SECRET_ACCESS_KEY') elif gdal.GetConfigOption('AWS_ACCESS_KEY_ID') is None: pytest.skip('Missing AWS_ACCESS_KEY_ID') if s3_resource is None: pytest.skip('Missing S3_RESOURCE') if '/' not in s3_resource: path = '/vsis3/' + s3_resource statres = gdal.VSIStatL(path) assert statres is not None and stat.S_ISDIR(statres.mode), \ ('%s is not a valid bucket' % path) readdir = gdal.ReadDir(path) assert readdir is not None, 'ReadDir() should not return empty list' for filename in readdir: if filename != '.': subpath = path + '/' + filename assert gdal.VSIStatL(subpath) is not None, \ ('Stat(%s) should not return an error' % subpath) unique_id = 'vsis3_test' subpath = path + '/' + unique_id ret = gdal.Mkdir(subpath, 0) assert ret >= 0, ('Mkdir(%s) should not return an error' % subpath) readdir = gdal.ReadDir(path) assert unique_id in readdir, \ ('ReadDir(%s) should contain %s' % (path, unique_id)) ret = gdal.Mkdir(subpath, 0) assert ret != 0, ('Mkdir(%s) repeated should return an error' % subpath) ret = gdal.Rmdir(subpath) assert ret >= 0, ('Rmdir(%s) should not return an error' % subpath) readdir = gdal.ReadDir(path) assert unique_id not in readdir, \ ('ReadDir(%s) should not contain %s' % (path, unique_id)) ret = gdal.Rmdir(subpath) assert ret != 0, ('Rmdir(%s) repeated should return an error' % subpath) ret = gdal.Mkdir(subpath, 0) assert ret >= 0, ('Mkdir(%s) should not return an error' % subpath) f = gdal.VSIFOpenL(subpath + '/test.txt', 'wb') assert f is not None gdal.VSIFWriteL('hello', 1, 5, f) gdal.VSIFCloseL(f) ret = gdal.Rmdir(subpath) assert ret != 0, \ ('Rmdir(%s) on non empty directory should return an error' % subpath) f = gdal.VSIFOpenL(subpath + '/test.txt', 'rb') assert f is not None data = gdal.VSIFReadL(1, 5, f).decode('utf-8') assert data == 'hello' gdal.VSIFCloseL(f) assert gdal.Rename(subpath + '/test.txt', subpath + '/test2.txt') == 0 f = gdal.VSIFOpenL(subpath + '/test2.txt', 'rb') assert f is not None data = gdal.VSIFReadL(1, 5, f).decode('utf-8') assert data == 'hello' gdal.VSIFCloseL(f) ret = gdal.Unlink(subpath + '/test2.txt') assert ret >= 0, \ ('Unlink(%s) should not return an error' % (subpath + '/test2.txt')) ret = gdal.Rmdir(subpath) assert ret >= 0, ('Rmdir(%s) should not return an error' % subpath) return f = open_for_read('/vsis3/' + s3_resource) assert f is not None, ('cannot open %s' % ('/vsis3/' + s3_resource)) ret = gdal.VSIFReadL(1, 1, f) gdal.VSIFCloseL(f) assert len(ret) == 1 # Same with /vsis3_streaming/ f = open_for_read('/vsis3_streaming/' + s3_resource) assert f is not None ret = gdal.VSIFReadL(1, 1, f) gdal.VSIFCloseL(f) assert len(ret) == 1 if False: # pylint: disable=using-constant-test # we actually try to read at read() time and bSetError = false # Invalid bucket : "The specified bucket does not exist" gdal.ErrorReset() f = open_for_read('/vsis3/not_existing_bucket/foo') with gdaltest.error_handler(): gdal.VSIFReadL(1, 1, f) gdal.VSIFCloseL(f) assert gdal.VSIGetLastErrorMsg() != '' # Invalid resource gdal.ErrorReset() f = open_for_read('/vsis3_streaming/' + gdal.GetConfigOption('S3_RESOURCE') + '/invalid_resource.baz') assert f is None, gdal.VSIGetLastErrorMsg() # Test GetSignedURL() signed_url = gdal.GetSignedURL('/vsis3/' + s3_resource) f = open_for_read('/vsicurl_streaming/' + signed_url) assert f is not None ret = gdal.VSIFReadL(1, 1, f) gdal.VSIFCloseL(f) assert len(ret) == 1 ############################################################################### def test_vsis3_cleanup(): for var in gdaltest.aws_vars: gdal.SetConfigOption(var, gdaltest.aws_vars[var]) gdal.SetConfigOption('CPL_AWS_CREDENTIALS_FILE', None) gdal.SetConfigOption('AWS_CONFIG_FILE', None) gdal.SetConfigOption('CPL_AWS_EC2_API_ROOT_URL', None)
1.210938
1
day06/part1.py
bugra-yilmaz/adventofcode2021
0
130
import os.path from collections import Counter import pytest INPUT_TXT = os.path.join(os.path.dirname(__file__), 'input.txt') def compute(s: str) -> int: lines = s.splitlines() numbers = Counter(int(f) for f in lines[0].split(",")) for d in range(80): numbers2 = Counter({8: numbers[0], 6: numbers[0]}) for k, v in numbers.items(): if k >= 1: numbers2[k - 1] += v numbers = numbers2 return sum(numbers.values()) INPUT_S = '''\ 3,4,3,1,2 ''' EXPECTED = 5934 @pytest.mark.parametrize( ('input_s', 'expected'), ( (INPUT_S, EXPECTED), ), ) def test(input_s: str, expected: int) -> None: assert compute(input_s) == expected def main() -> int: with open(INPUT_TXT, "r") as f: print(compute(f.read())) return 0 if __name__ == '__main__': raise SystemExit(main())
3.140625
3
functional_tests.py
gustavomazevedo/tbackup-client
0
131
<filename>functional_tests.py from selenium import webdriver browser = webdriver.Firefox() browser.get('http://localhost:8000') assert 'Django' in browser.title
1.953125
2
examples/first_char_last_column.py
clarkfitzg/sta141c
24
132
<filename>examples/first_char_last_column.py #!/usr/bin/env python3 """ For the last column, print only the first character. Usage: $ printf "100,200\n0,\n" | python3 first_char_last_column.py Should print "100,2\n0," """ import csv from sys import stdin, stdout def main(): reader = csv.reader(stdin) writer = csv.writer(stdout) for row in reader: try: row[-1] = row[-1][0] except IndexError: # Python: Better to ask forgiveness than permission # Alternative: Look before you leap pass writer.writerow(row) if __name__ == "__main__": main()
3.609375
4
env_ci.py
reloadware/stickybeak
0
133
from pathlib import Path root = Path(__file__).parent.absolute() import envo envo.add_source_roots([root]) from pathlib import Path from typing import Any, Dict, List, Optional, Tuple from envo import Env, Namespace, env_var, logger, run from env_comm import StickybeakCommEnv as ParentEnv p = Namespace("p") class StickybeakCiEnv(ParentEnv): class Meta(ParentEnv.Meta): stage: str = "ci" emoji: str = "⚙" load_env_vars = True class Environ(ParentEnv.Environ): pypi_username: Optional[str] = env_var(raw=True) pypi_password: Optional[str] = env_var(raw=True) e: Environ def init(self) -> None: super().init() @p.command def bootstrap(self, test_apps=True) -> None: super().bootstrap(test_apps) @p.command def test(self) -> None: run("pytest --reruns 2 -v tests") @p.command def build(self) -> None: run("poetry build") @p.command def publish(self) -> None: run(f'poetry publish --username "{self.e.pypi_username}" --password "{self.e.pypi_password}"', verbose=False) @p.command def rstcheck(self) -> None: pass # run("rstcheck README.rst | tee ./workspace/rstcheck.txt") @p.command def flake(self) -> None: pass # run("flake8 . | tee ./workspace/flake8.txt") @p.command def check_black(self) -> None: run("black --check .") @p.command def check_isort(self) -> None: run("black --check .") @p.command def mypy(self) -> None: pass run("mypy .") @p.command def generate_version(self) -> None: import toml config = toml.load(str(self.meta.root / "pyproject.toml")) version: str = config["tool"]["poetry"]["version"] version_file = self.meta.root / "stickybeak/__version__.py" Path(version_file).touch() version_file.write_text(f'__version__ = "{version}"\n') ThisEnv = StickybeakCiEnv
2.234375
2
zmq_srv.py
iyedb/boost_asio_zeromq
4
134
<reponame>iyedb/boost_asio_zeromq from __future__ import print_function import zmq import time ADDR='tcp://127.0.0.1:11155' ctx = zmq.Context() srv = ctx.socket(zmq.REP) srv.bind(ADDR) #srv.setsockopt(zmq.RCVTIMEO, 3000); while True: try: msg = srv.recv() except Exception as e: print('zmq socket revc timedout:', e) else: print('client says: %s' % msg) srv.send('hi from server') time.sleep(2)
2.53125
3
mypy/server/aststrip.py
mmaryada27/mypy
0
135
<gh_stars>0 """Strip/reset AST in-place to match state after semantic analysis pass 1. Fine-grained incremental mode reruns semantic analysis (passes 2 and 3) and type checking for *existing* AST nodes (targets) when changes are propagated using fine-grained dependencies. AST nodes attributes are often changed during semantic analysis passes 2 and 3, and running semantic analysis again on those nodes would produce incorrect results, since these passes aren't idempotent. This pass resets AST nodes to reflect the state after semantic analysis pass 1, so that we can rerun semantic analysis. (The above is in contrast to behavior with modules that have source code changes, for which we reparse the entire module and reconstruct a fresh AST. No stripping is required in this case. Both modes of operation should have the same outcome.) Notes: * This is currently pretty fragile, as we must carefully undo whatever changes can be made in semantic analysis passes 2 and 3, including changes to symbol tables. * We reuse existing AST nodes because it makes it relatively straightforward to reprocess only a single target within a module efficiently. If there was a way to parse a single target within a file, in time proportional to the size of the target, we'd rather create fresh AST nodes than strip them. Alas, no such facility exists and building it is non-trivial. * Currently we don't actually reset all changes, but only those known to affect non-idempotent semantic analysis behavior. TODO: It would be more principled and less fragile to reset everything changed in semantic analysis pass 2 and later. * Reprocessing may recreate AST nodes (such as Var nodes, and TypeInfo nodes created with assignment statements) that will get different identities from the original AST. Thus running an AST merge is necessary after stripping, even though some identities are preserved. """ import contextlib from typing import Union, Iterator, Optional from mypy.nodes import ( Node, FuncDef, NameExpr, MemberExpr, RefExpr, MypyFile, FuncItem, ClassDef, AssignmentStmt, ImportFrom, Import, TypeInfo, SymbolTable, Var, CallExpr, Decorator, OverloadedFuncDef, SuperExpr, UNBOUND_IMPORTED, GDEF, MDEF, IndexExpr ) from mypy.traverser import TraverserVisitor def strip_target(node: Union[MypyFile, FuncItem, OverloadedFuncDef]) -> None: """Reset a fine-grained incremental target to state after semantic analysis pass 1. NOTE: Currently we opportunistically only reset changes that are known to otherwise cause trouble. """ visitor = NodeStripVisitor() if isinstance(node, MypyFile): visitor.strip_file_top_level(node) else: node.accept(visitor) class NodeStripVisitor(TraverserVisitor): def __init__(self) -> None: self.type = None # type: Optional[TypeInfo] self.names = None # type: Optional[SymbolTable] self.is_class_body = False # By default, process function definitions. If False, don't -- this is used for # processing module top levels. self.recurse_into_functions = True def strip_file_top_level(self, file_node: MypyFile) -> None: """Strip a module top-level (don't recursive into functions).""" self.names = file_node.names self.recurse_into_functions = False file_node.accept(self) def visit_class_def(self, node: ClassDef) -> None: """Strip class body and type info, but don't strip methods.""" node.info.type_vars = [] node.info.bases = [] node.info.abstract_attributes = [] node.info.mro = [] node.info.add_type_vars() node.info.tuple_type = None node.info.typeddict_type = None node.info._cache = set() node.info._cache_proper = set() node.base_type_exprs.extend(node.removed_base_type_exprs) node.removed_base_type_exprs = [] with self.enter_class(node.info): super().visit_class_def(node) def visit_func_def(self, node: FuncDef) -> None: if not self.recurse_into_functions: return node.expanded = [] node.type = node.unanalyzed_type with self.enter_method(node.info) if node.info else nothing(): super().visit_func_def(node) def visit_decorator(self, node: Decorator) -> None: node.var.type = None for expr in node.decorators: expr.accept(self) if self.recurse_into_functions: node.func.accept(self) def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None: if not self.recurse_into_functions: return if node.impl: # Revert change made during semantic analysis pass 2. assert node.items[-1] is not node.impl node.items.append(node.impl) super().visit_overloaded_func_def(node) @contextlib.contextmanager def enter_class(self, info: TypeInfo) -> Iterator[None]: # TODO: Update and restore self.names old_type = self.type old_is_class_body = self.is_class_body self.type = info self.is_class_body = True yield self.type = old_type self.is_class_body = old_is_class_body @contextlib.contextmanager def enter_method(self, info: TypeInfo) -> Iterator[None]: # TODO: Update and restore self.names old_type = self.type old_is_class_body = self.is_class_body self.type = info self.is_class_body = False yield self.type = old_type self.is_class_body = old_is_class_body def visit_assignment_stmt(self, node: AssignmentStmt) -> None: node.type = node.unanalyzed_type if self.type and not self.is_class_body: # TODO: Handle multiple assignment if len(node.lvalues) == 1: lvalue = node.lvalues[0] if isinstance(lvalue, MemberExpr) and lvalue.is_new_def: # Remove defined attribute from the class symbol table. If is_new_def is # true for a MemberExpr, we know that it must be an assignment through # self, since only those can define new attributes. del self.type.names[lvalue.name] super().visit_assignment_stmt(node) def visit_import_from(self, node: ImportFrom) -> None: if node.assignments: node.assignments = [] else: if self.names: # Reset entries in the symbol table. This is necessary since # otherwise the semantic analyzer will think that the import # assigns to an existing name instead of defining a new one. for name, as_name in node.names: imported_name = as_name or name symnode = self.names[imported_name] symnode.kind = UNBOUND_IMPORTED symnode.node = None def visit_import(self, node: Import) -> None: if node.assignments: node.assignments = [] else: if self.names: # Reset entries in the symbol table. This is necessary since # otherwise the semantic analyzer will think that the import # assigns to an existing name instead of defining a new one. for name, as_name in node.ids: imported_name = as_name or name initial = imported_name.split('.')[0] symnode = self.names[initial] symnode.kind = UNBOUND_IMPORTED symnode.node = None def visit_name_expr(self, node: NameExpr) -> None: # Global assignments are processed in semantic analysis pass 1, and we # only want to strip changes made in passes 2 or later. if not (node.kind == GDEF and node.is_new_def): # Remove defined attributes so that they can recreated during semantic analysis. if node.kind == MDEF and node.is_new_def: self.strip_class_attr(node.name) self.strip_ref_expr(node) def visit_member_expr(self, node: MemberExpr) -> None: self.strip_ref_expr(node) # These need to cleared for member expressions but not for other RefExprs since # these can change based on changed in a base class. node.is_new_def = False node.is_inferred_def = False if self.is_duplicate_attribute_def(node): # This is marked as an instance variable definition but a base class # defines an attribute with the same name, and we can't have # multiple definitions for an attribute. Defer to the base class # definition. self.strip_class_attr(node.name) node.def_var = None super().visit_member_expr(node) def visit_index_expr(self, node: IndexExpr) -> None: node.analyzed = None # was a type alias super().visit_index_expr(node) def strip_class_attr(self, name: str) -> None: if self.type is not None: del self.type.names[name] def is_duplicate_attribute_def(self, node: MemberExpr) -> bool: if not node.is_inferred_def: return False assert self.type is not None, "Internal error: Member defined outside class" if node.name not in self.type.names: return False return any(info.get(node.name) is not None for info in self.type.mro[1:]) def strip_ref_expr(self, node: RefExpr) -> None: node.kind = None node.node = None node.fullname = None node.is_new_def = False node.is_inferred_def = False def visit_call_expr(self, node: CallExpr) -> None: node.analyzed = None super().visit_call_expr(node) def visit_super_expr(self, node: SuperExpr) -> None: node.info = None super().visit_super_expr(node) # TODO: handle more node types def is_self_member_ref(memberexpr: MemberExpr) -> bool: """Does memberexpr refer to an attribute of self?""" # TODO: Merge with is_self_member_ref in semanal.py. if not isinstance(memberexpr.expr, NameExpr): return False node = memberexpr.expr.node return isinstance(node, Var) and node.is_self @contextlib.contextmanager def nothing() -> Iterator[None]: yield
2.1875
2
bsp/nrf5x/tools/sdk_dist.py
BreederBai/rt-thread
7,482
136
import os import sys import shutil cwd_path = os.getcwd() sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools')) # BSP dist function def dist_do_building(BSP_ROOT, dist_dir): from mkdist import bsp_copy_files import rtconfig library_dir = os.path.join(dist_dir, 'libraries') print("=> copy nrf52 bsp libraries") library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries') bsp_copy_files(library_path, library_dir)
2.140625
2
lib/spack/spack/multimethod.py
kkauder/spack
2
137
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """This module contains utilities for using multi-methods in spack. You can think of multi-methods like overloaded methods -- they're methods with the same name, and we need to select a version of the method based on some criteria. e.g., for overloaded methods, you would select a version of the method to call based on the types of its arguments. In spack, multi-methods are used to ease the life of package authors. They allow methods like install() (or other methods called by install()) to declare multiple versions to be called when the package is instantiated with different specs. e.g., if the package is built with OpenMPI on x86_64,, you might want to call a different install method than if it was built for mpich2 on BlueGene/Q. Likewise, you might want to do a different type of install for different versions of the package. Multi-methods provide a simple decorator-based syntax for this that avoids overly complicated rat nests of if statements. Obviously, depending on the scenario, regular old conditionals might be clearer, so package authors should use their judgement. """ import functools import inspect from llnl.util.lang import caller_locals import spack.architecture import spack.error from spack.spec import Spec class MultiMethodMeta(type): """This allows us to track the class's dict during instantiation.""" #: saved dictionary of attrs on the class being constructed _locals = None @classmethod def __prepare__(cls, name, bases, **kwargs): """Save the dictionary that will be used for the class namespace.""" MultiMethodMeta._locals = dict() return MultiMethodMeta._locals def __init__(cls, name, bases, attr_dict): """Clear out the cached locals dict once the class is built.""" MultiMethodMeta._locals = None super(MultiMethodMeta, cls).__init__(name, bases, attr_dict) class SpecMultiMethod(object): """This implements a multi-method for Spack specs. Packages are instantiated with a particular spec, and you may want to execute different versions of methods based on what the spec looks like. For example, you might want to call a different version of install() for one platform than you call on another. The SpecMultiMethod class implements a callable object that handles method dispatch. When it is called, it looks through registered methods and their associated specs, and it tries to find one that matches the package's spec. If it finds one (and only one), it will call that method. This is intended for use with decorators (see below). The decorator (see docs below) creates SpecMultiMethods and registers method versions with them. To register a method, you can do something like this: mm = SpecMultiMethod() mm.register("^chaos_5_x86_64_ib", some_method) The object registered needs to be a Spec or some string that will parse to be a valid spec. When the mm is actually called, it selects a version of the method to call based on the sys_type of the object it is called on. See the docs for decorators below for more details. """ def __init__(self, default=None): self.method_list = [] self.default = default if default: functools.update_wrapper(self, default) def register(self, spec, method): """Register a version of a method for a particular spec.""" self.method_list.append((spec, method)) if not hasattr(self, '__name__'): functools.update_wrapper(self, method) else: assert(self.__name__ == method.__name__) def __get__(self, obj, objtype): """This makes __call__ support instance methods.""" # Method_list is a list of tuples (constraint, method) # Here we are going to assume that we have at least one # element in the list. The first registered function # will be the one 'wrapped'. wrapped_method = self.method_list[0][1] # Call functools.wraps manually to get all the attributes # we need to be disguised as the wrapped_method func = functools.wraps(wrapped_method)( functools.partial(self.__call__, obj) ) return func def _get_method_by_spec(self, spec): """Find the method of this SpecMultiMethod object that satisfies the given spec, if one exists """ for condition, method in self.method_list: if spec.satisfies(condition): return method return self.default or None def __call__(self, package_self, *args, **kwargs): """Find the first method with a spec that matches the package's spec. If none is found, call the default or if there is none, then raise a NoSuchMethodError. """ spec_method = self._get_method_by_spec(package_self.spec) if spec_method: return spec_method(package_self, *args, **kwargs) # Unwrap the MRO of `package_self by hand. Note that we can't # use `super()` here, because using `super()` recursively # requires us to know the class of `package_self`, as well as # its superclasses for successive calls. We don't have that # information within `SpecMultiMethod`, because it is not # associated with the package class. for cls in inspect.getmro(package_self.__class__)[1:]: superself = cls.__dict__.get(self.__name__, None) if isinstance(superself, SpecMultiMethod): # Check parent multimethod for method for spec. superself_method = superself._get_method_by_spec( package_self.spec ) if superself_method: return superself_method(package_self, *args, **kwargs) elif superself: return superself(package_self, *args, **kwargs) raise NoSuchMethodError( type(package_self), self.__name__, package_self.spec, [m[0] for m in self.method_list] ) class when(object): """This annotation lets packages declare multiple versions of methods like install() that depend on the package's spec. For example: .. code-block:: python class SomePackage(Package): ... def install(self, prefix): # Do default install @when('target=x86_64:') def install(self, prefix): # This will be executed instead of the default install if # the package's target is in the x86_64 family. @when('target=ppc64:') def install(self, prefix): # This will be executed if the package's target is in # the ppc64 family This allows each package to have a default version of install() AND specialized versions for particular platforms. The version that is called depends on the architecutre of the instantiated package. Note that this works for methods other than install, as well. So, if you only have part of the install that is platform specific, you could do this: .. code-block:: python class SomePackage(Package): ... # virtual dependence on MPI. # could resolve to mpich, mpich2, OpenMPI depends_on('mpi') def setup(self): # do nothing in the default case pass @when('^openmpi') def setup(self): # do something special when this is built with OpenMPI for # its MPI implementations. def install(self, prefix): # Do common install stuff self.setup() # Do more common install stuff Note that the default version of decorated methods must *always* come first. Otherwise it will override all of the platform-specific versions. There's not much we can do to get around this because of the way decorators work. """ def __init__(self, condition): if isinstance(condition, bool): self.spec = Spec() if condition else None else: self.spec = Spec(condition) def __call__(self, method): # In Python 2, Get the first definition of the method in the # calling scope by looking at the caller's locals. In Python 3, # we handle this using MultiMethodMeta.__prepare__. if MultiMethodMeta._locals is None: MultiMethodMeta._locals = caller_locals() # Create a multimethod with this name if there is not one already original_method = MultiMethodMeta._locals.get(method.__name__) if not type(original_method) == SpecMultiMethod: original_method = SpecMultiMethod(original_method) if self.spec is not None: original_method.register(self.spec, method) return original_method class MultiMethodError(spack.error.SpackError): """Superclass for multimethod dispatch errors""" def __init__(self, message): super(MultiMethodError, self).__init__(message) class NoSuchMethodError(spack.error.SpackError): """Raised when we can't find a version of a multi-method.""" def __init__(self, cls, method_name, spec, possible_specs): super(NoSuchMethodError, self).__init__( "Package %s does not support %s called with %s. Options are: %s" % (cls.__name__, method_name, spec, ", ".join(str(s) for s in possible_specs)))
2.453125
2
third_party/protobuf/protobuf.gyp
meego-tablet-ux/meego-app-browser
1
138
# Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'conditions': [ ['OS!="win"', { 'variables': { 'config_h_dir': '.', # crafted for gcc/linux. }, }, { # else, OS=="win" 'variables': { 'config_h_dir': 'vsprojects', # crafted for msvc. }, 'target_defaults': { 'msvs_disabled_warnings': [ 4018, # signed/unsigned mismatch in comparison 4244, # implicit conversion, possible loss of data 4355, # 'this' used in base member initializer list ], 'defines!': [ 'WIN32_LEAN_AND_MEAN', # Protobuf defines this itself. ], }, }] ], 'targets': [ # The "lite" lib is about 1/7th the size of the heavy lib, # but it doesn't support some of the more exotic features of # protobufs, like reflection. To generate C++ code that can link # against the lite version of the library, add the option line: # # option optimize_for = LITE_RUNTIME; # # to your .proto file. { 'target_name': 'protobuf_lite', 'type': '<(library)', 'toolsets': ['host', 'target'], 'sources': [ 'src/google/protobuf/stubs/common.h', 'src/google/protobuf/stubs/once.h', 'src/google/protobuf/extension_set.h', 'src/google/protobuf/generated_message_util.h', 'src/google/protobuf/message_lite.h', 'src/google/protobuf/repeated_field.h', 'src/google/protobuf/unknown_field_set.cc', 'src/google/protobuf/unknown_field_set.h', 'src/google/protobuf/wire_format_lite.h', 'src/google/protobuf/wire_format_lite_inl.h', 'src/google/protobuf/io/coded_stream.h', 'src/google/protobuf/io/zero_copy_stream.h', 'src/google/protobuf/io/zero_copy_stream_impl_lite.h', 'src/google/protobuf/stubs/common.cc', 'src/google/protobuf/stubs/once.cc', 'src/google/protobuf/stubs/hash.h', 'src/google/protobuf/stubs/map-util.h', 'src/google/protobuf/stubs/stl_util-inl.h', 'src/google/protobuf/extension_set.cc', 'src/google/protobuf/generated_message_util.cc', 'src/google/protobuf/message_lite.cc', 'src/google/protobuf/repeated_field.cc', 'src/google/protobuf/wire_format_lite.cc', 'src/google/protobuf/io/coded_stream.cc', 'src/google/protobuf/io/coded_stream_inl.h', 'src/google/protobuf/io/zero_copy_stream.cc', 'src/google/protobuf/io/zero_copy_stream_impl_lite.cc', '<(config_h_dir)/config.h', ], 'include_dirs': [ '<(config_h_dir)', 'src', ], # This macro must be defined to suppress the use of dynamic_cast<>, # which requires RTTI. 'defines': [ 'GOOGLE_PROTOBUF_NO_RTTI', ], 'direct_dependent_settings': { 'include_dirs': [ '<(config_h_dir)', 'src', ], 'defines': [ 'GOOGLE_PROTOBUF_NO_RTTI', ], }, }, # This is the full, heavy protobuf lib that's needed for c++ .proto's # that don't specify the LITE_RUNTIME option. The protocol # compiler itself (protoc) falls into that category. # # DO NOT LINK AGAINST THIS TARGET IN CHROME CODE --agl { 'target_name': 'protobuf_full_do_not_use', 'type': '<(library)', 'toolsets': ['host','target'], 'sources': [ 'src/google/protobuf/descriptor.h', 'src/google/protobuf/descriptor.pb.h', 'src/google/protobuf/descriptor_database.h', 'src/google/protobuf/dynamic_message.h', 'src/google/protobuf/generated_message_reflection.h', 'src/google/protobuf/message.h', 'src/google/protobuf/reflection_ops.h', 'src/google/protobuf/service.h', 'src/google/protobuf/text_format.h', 'src/google/protobuf/unknown_field_set.h', 'src/google/protobuf/wire_format.h', 'src/google/protobuf/io/gzip_stream.h', 'src/google/protobuf/io/printer.h', 'src/google/protobuf/io/tokenizer.h', 'src/google/protobuf/io/zero_copy_stream_impl.h', 'src/google/protobuf/compiler/code_generator.h', 'src/google/protobuf/compiler/command_line_interface.h', 'src/google/protobuf/compiler/importer.h', 'src/google/protobuf/compiler/parser.h', 'src/google/protobuf/stubs/strutil.cc', 'src/google/protobuf/stubs/strutil.h', 'src/google/protobuf/stubs/substitute.cc', 'src/google/protobuf/stubs/substitute.h', 'src/google/protobuf/stubs/structurally_valid.cc', 'src/google/protobuf/descriptor.cc', 'src/google/protobuf/descriptor.pb.cc', 'src/google/protobuf/descriptor_database.cc', 'src/google/protobuf/dynamic_message.cc', 'src/google/protobuf/extension_set_heavy.cc', 'src/google/protobuf/generated_message_reflection.cc', 'src/google/protobuf/message.cc', 'src/google/protobuf/reflection_ops.cc', 'src/google/protobuf/service.cc', 'src/google/protobuf/text_format.cc', 'src/google/protobuf/unknown_field_set.cc', 'src/google/protobuf/wire_format.cc', # This file pulls in zlib, but it's not actually used by protoc, so # instead of compiling zlib for the host, let's just exclude this. # 'src/src/google/protobuf/io/gzip_stream.cc', 'src/google/protobuf/io/printer.cc', 'src/google/protobuf/io/tokenizer.cc', 'src/google/protobuf/io/zero_copy_stream_impl.cc', 'src/google/protobuf/compiler/importer.cc', 'src/google/protobuf/compiler/parser.cc', ], 'dependencies': [ 'protobuf_lite', ], 'export_dependent_settings': [ 'protobuf_lite', ], }, { 'target_name': 'protoc', 'type': 'executable', 'toolsets': ['host'], 'sources': [ 'src/google/protobuf/compiler/code_generator.cc', 'src/google/protobuf/compiler/command_line_interface.cc', 'src/google/protobuf/compiler/plugin.cc', 'src/google/protobuf/compiler/plugin.pb.cc', 'src/google/protobuf/compiler/subprocess.cc', 'src/google/protobuf/compiler/subprocess.h', 'src/google/protobuf/compiler/zip_writer.cc', 'src/google/protobuf/compiler/zip_writer.h', 'src/google/protobuf/compiler/cpp/cpp_enum.cc', 'src/google/protobuf/compiler/cpp/cpp_enum.h', 'src/google/protobuf/compiler/cpp/cpp_enum_field.cc', 'src/google/protobuf/compiler/cpp/cpp_enum_field.h', 'src/google/protobuf/compiler/cpp/cpp_extension.cc', 'src/google/protobuf/compiler/cpp/cpp_extension.h', 'src/google/protobuf/compiler/cpp/cpp_field.cc', 'src/google/protobuf/compiler/cpp/cpp_field.h', 'src/google/protobuf/compiler/cpp/cpp_file.cc', 'src/google/protobuf/compiler/cpp/cpp_file.h', 'src/google/protobuf/compiler/cpp/cpp_generator.cc', 'src/google/protobuf/compiler/cpp/cpp_helpers.cc', 'src/google/protobuf/compiler/cpp/cpp_helpers.h', 'src/google/protobuf/compiler/cpp/cpp_message.cc', 'src/google/protobuf/compiler/cpp/cpp_message.h', 'src/google/protobuf/compiler/cpp/cpp_message_field.cc', 'src/google/protobuf/compiler/cpp/cpp_message_field.h', 'src/google/protobuf/compiler/cpp/cpp_primitive_field.cc', 'src/google/protobuf/compiler/cpp/cpp_primitive_field.h', 'src/google/protobuf/compiler/cpp/cpp_service.cc', 'src/google/protobuf/compiler/cpp/cpp_service.h', 'src/google/protobuf/compiler/cpp/cpp_string_field.cc', 'src/google/protobuf/compiler/cpp/cpp_string_field.h', 'src/google/protobuf/compiler/java/java_enum.cc', 'src/google/protobuf/compiler/java/java_enum.h', 'src/google/protobuf/compiler/java/java_enum_field.cc', 'src/google/protobuf/compiler/java/java_enum_field.h', 'src/google/protobuf/compiler/java/java_extension.cc', 'src/google/protobuf/compiler/java/java_extension.h', 'src/google/protobuf/compiler/java/java_field.cc', 'src/google/protobuf/compiler/java/java_field.h', 'src/google/protobuf/compiler/java/java_file.cc', 'src/google/protobuf/compiler/java/java_file.h', 'src/google/protobuf/compiler/java/java_generator.cc', 'src/google/protobuf/compiler/java/java_helpers.cc', 'src/google/protobuf/compiler/java/java_helpers.h', 'src/google/protobuf/compiler/java/java_message.cc', 'src/google/protobuf/compiler/java/java_message.h', 'src/google/protobuf/compiler/java/java_message_field.cc', 'src/google/protobuf/compiler/java/java_message_field.h', 'src/google/protobuf/compiler/java/java_primitive_field.cc', 'src/google/protobuf/compiler/java/java_primitive_field.h', 'src/google/protobuf/compiler/java/java_service.cc', 'src/google/protobuf/compiler/java/java_service.h', 'src/google/protobuf/compiler/java/java_string_field.cc', 'src/google/protobuf/compiler/java/java_string_field.h', 'src/google/protobuf/compiler/python/python_generator.cc', 'src/google/protobuf/compiler/main.cc', ], 'dependencies': [ 'protobuf_full_do_not_use', ], 'include_dirs': [ '<(config_h_dir)', 'src/src', ], }, { # Generate the python module needed by all protoc-generated Python code. 'target_name': 'py_proto', 'type': 'none', 'copies': [ { 'destination': '<(PRODUCT_DIR)/pyproto/google/', 'files': [ # google/ module gets an empty __init__.py. '__init__.py', ], }, { 'destination': '<(PRODUCT_DIR)/pyproto/google/protobuf', 'files': [ 'python/google/protobuf/__init__.py', 'python/google/protobuf/descriptor.py', 'python/google/protobuf/message.py', 'python/google/protobuf/reflection.py', 'python/google/protobuf/service.py', 'python/google/protobuf/service_reflection.py', 'python/google/protobuf/text_format.py', # TODO(ncarter): protoc's python generator treats descriptor.proto # specially, but it's not possible to trigger the special treatment # unless you run protoc from ./src/src (the treatment is based # on the path to the .proto file matching a constant exactly). # I'm not sure how to convince gyp to execute a rule from a # different directory. Until this is resolved, use a copy of # descriptor_pb2.py that I manually generated. 'descriptor_pb2.py', ], }, { 'destination': '<(PRODUCT_DIR)/pyproto/google/protobuf/internal', 'files': [ 'python/google/protobuf/internal/__init__.py', 'python/google/protobuf/internal/api_implementation.py', 'python/google/protobuf/internal/containers.py', 'python/google/protobuf/internal/cpp_message.py', 'python/google/protobuf/internal/decoder.py', 'python/google/protobuf/internal/encoder.py', 'python/google/protobuf/internal/generator_test.py', 'python/google/protobuf/internal/message_listener.py', 'python/google/protobuf/internal/python_message.py', 'python/google/protobuf/internal/type_checkers.py', 'python/google/protobuf/internal/wire_format.py', ], }, ], # # We can't generate a proper descriptor_pb2.py -- see earlier comment. # 'rules': [ # { # 'rule_name': 'genproto', # 'extension': 'proto', # 'inputs': [ # '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)', # ], # 'variables': { # # The protoc compiler requires a proto_path argument with the # # directory containing the .proto file. # 'rule_input_relpath': 'src/google/protobuf', # }, # 'outputs': [ # '<(PRODUCT_DIR)/pyproto/google/protobuf/<(RULE_INPUT_ROOT)_pb2.py', # ], # 'action': [ # '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)', # '-I./src', # '-I.', # '--python_out=<(PRODUCT_DIR)/pyproto/google/protobuf', # 'google/protobuf/descriptor.proto', # ], # 'message': 'Generating Python code from <(RULE_INPUT_PATH)', # }, # ], # 'dependencies': [ # 'protoc#host', # ], # 'sources': [ # 'src/google/protobuf/descriptor.proto', # ], }, ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
1.296875
1
main.py
Yash-s-Code-Camp/Python-Day-4
0
139
<gh_stars>0 # def mul(a): # return lambda b:b*a # singler = mul(1) # addition = lambda b:b*1 # doubler = mul(2) # addition = lambda b:b*2 # tripler = mul(3) # addition = lambda b:b*3 # print(doubler(7)) # 7*2 = 14 # print(tripler(7)) # 7*3 = 21 # print(singler(7)) # 7*1 = 7 class Student: def __init__(self, fname): self.fname = fname def greet(self, fname): return f"Hello, {fname}" class BatchA(Student): def __init__(self, lname): self.lname = lname #Student.__init__(self, "Nikunj") super().__init__("Nikunj") def printName(self): return f"{self.fname} {self.lname}" stud = BatchA("Thakor") print(stud.printName()) rgb(255, 255, 255) # White rgb(255, 0, 0) # Red rgb(0, 0, 0) # Black rgb(0, 255, 255) # Cyan rgb(255, 255, 0) # Yellow #00ff00 //green #1e90ff //dodgerblue
3.609375
4
paperstream/create_diary.py
MarcoRosso/paperstream
0
140
""" Create diaries in A5 and A4 sizes based on PDF templates. <NAME> """ import datetime import math import sys from io import BytesIO from pathlib import Path from PyPDF2 import PdfFileReader, PdfFileWriter from reportlab.lib.pagesizes import A5, A4 from reportlab.lib.utils import ImageReader from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFError, TTFont from reportlab.pdfgen import canvas def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ base_path = getattr(sys, '_MEIPASS', Path(__file__).resolve().parent) return base_path / Path(relative_path) CORNER_DIR = resource_path("input/1_diaries_to_create/resources") LOGO_PATH = resource_path(CORNER_DIR / Path("logo.png")) DEFAULT_FONT = resource_path(CORNER_DIR / Path('FreeSansLocal.ttf')) CREATED_DIARIES_DIR = resource_path("output/created_diaries/") ############################################################# ############################################################# ############################################################# ##### Algorithm to convert A4 pages into an A5 booklet ###### ############################################################# ############################################################# ############################################################# ## Adapted from the work by <NAME>, https://bitbucket.org/spookylukey/booklet-maker/src class Sheet(object): '''A4 Sheets''' def __init__(self): self.front = PrintPage() self.back = PrintPage() class PrintPage(object): '''A4 page with containers for A4 pages''' def __init__(self): self.left = PageContainer() self.right = PageContainer() class PageContainer(object): '''A5 containers''' def __init__(self): self.page = None def build_booklet(pages): ''' Build booklet ''' # Double sized page, with double-sided printing, fits 4 of the original. sheet_count = int(math.ceil(len(pages) / 4.0)) booklet = [Sheet() for i in range(0, sheet_count)] # Assign input pages to sheets # This is the core algo. To understand it: # * pick up 3 A4 sheets, landscape # * number the sheets from 1 to 3, starting with bottom one # * fold the stack in the middle to form an A5 booklet # * work out what order you need to use the front left, # front right, back left and back right sides. def containers(): '''Yields parts of the booklet in the order they should be used.''' for sheet in booklet: yield sheet.back.right yield sheet.front.left for sheet in reversed(booklet): yield sheet.front.right yield sheet.back.left for container, page in zip(containers(), pages): container.page = page return booklet def add_double_page(writer, page_size, print_page): ''' Adds a double page ''' width, height = page_size page = writer.insertBlankPage(width=width, height=height, index=writer.getNumPages()) # Merge the left page l_page = print_page.left.page if l_page is not None: page.mergePage(l_page) # Merge the right page with translation r_page = print_page.right.page if r_page is not None: page.mergeTranslatedPage(r_page, width / 2, 0) def convert_to_a5_booklet(input_file, blanks=0): '''Converts a PDF into a double sided A5 file to print as an A4 (two A5 pages per A4 page)''' # Create internal dir to save the a5 files a5_booklets_dir = CREATED_DIARIES_DIR Path.mkdir(a5_booklets_dir, parents=True, exist_ok=True) # Create the a5 booklet's name a5_booklet_name = Path(input_file).stem + "_as_a5_booklet" a5_booklet = a5_booklets_dir / Path("{}.pdf".format(a5_booklet_name)) reader = PdfFileReader(open(input_file, "rb")) pages = [reader.getPage(p) for p in range(0, reader.getNumPages())] for index in range(0, blanks): pages.insert(0, None) sheets = build_booklet(pages) writer = PdfFileWriter() firs_page = reader.getPage(0) input_width = firs_page.mediaBox.getWidth() output_width = input_width * 2 input_height = firs_page.mediaBox.getHeight() output_height = input_height page_size = (output_width, output_height) # We want to group fronts and backs together. for sheet in sheets: add_double_page(writer, page_size, sheet.back) add_double_page(writer, page_size, sheet.front) with open(a5_booklet, "wb") as a5_booklet_stream: writer.write(a5_booklet_stream) return a5_booklet ############################################################# ############################################################# ############################################################# ########## Create A4 paper diary ############ ############################################################# ############################################################# ############################################################# def create_diary_cover(participant_id, email, font): '''Create cover of the A5 diary''' packet = BytesIO() cover_canvas = canvas.Canvas(packet, pagesize=A4) width, height = A4 # Centering the logo or participant ID if Path.exists(LOGO_PATH): logo = ImageReader(LOGO_PATH) cover_canvas.drawImage(logo, x=(width * (1/6.0)), y=(height/4), width=width * (4/6.0), preserveAspectRatio=True, mask='auto') else: cover_canvas.setFont(font, 50) cover_canvas.drawCentredString(width/2, height/2, participant_id) # Lost legend if not (email is None or email == ""): cover_canvas.setFont(font, 15) cover_canvas.drawCentredString(width/2, 50, "If you find this document, please email " + email) cover_canvas.save() packet.seek(0) return PdfFileReader(packet).getPage(0) def create_diary_page(pdf_template, font, top_left_text, page_number, top_right_text): packet = BytesIO() diary_canvas = canvas.Canvas(packet, pagesize=A5) # Header diary_canvas.setFont(font, 11) #diary_canvas.drawRightString(378, 562, str(top_right_text)) diary_canvas.drawString(36.5, 562, top_left_text) # Corners corners = [(CORNER_DIR / Path("corner_ul.png"), 25, 553), (CORNER_DIR / Path("corner_ur.png"), 365, 553), (CORNER_DIR / Path("corner_bl.png"), 25, 15), (CORNER_DIR / Path("corner_br.png"), 365, 15)] for corner_path, x, y in corners: if corner_path.exists(): corner = ImageReader(corner_path) diary_canvas.drawImage(corner, x=x, y=y, mask='auto') # Footer #diary_canvas.setFont(font, 8) #diary_canvas.drawString(36.5, 24, str(page_number)) diary_canvas.save() # Merge template and additions (header, corners and footer) packet.seek(0) page_additions = PdfFileReader(packet).getPage(0) new_page = PdfFileReader(open(pdf_template, "rb")).getPage(0) new_page.mergePage(page_additions) new_page.scaleTo(A4[0], A4[1]) return new_page def create_a4_diary(pdf_template, pages, top_left_text, email=None, font='Arial'): """Creates an A4 document with [PAGES] from [STARTING_DATE]""" starting_date = parse_date(top_left_text) font = set_active_font(font) # Create output folder/file if not Path(pdf_template).exists(): raise ValueError("Template does not exist {}".format(pdf_template)) Path.mkdir(CREATED_DIARIES_DIR, parents=True, exist_ok=True) a4_document_name = Path(pdf_template).stem a4_document_path = CREATED_DIARIES_DIR / Path("{}_document.pdf".format(a4_document_name)) pdf_file = PdfFileWriter() # Cover pdf_file.addPage(create_diary_cover(a4_document_name, email, font)) pdf_file.addBlankPage() # Pages for page in range(1, pages+1): if starting_date is not None: top_left_text = starting_date.strftime('%A, %d %b %Y') starting_date += datetime.timedelta(days=1) new_page = create_diary_page(pdf_template, font, top_left_text,page, a4_document_name) pdf_file.addPage(new_page) # Backcover pdf_file.addBlankPage() # Save a4 document with open(a4_document_path, "wb") as output_stream: pdf_file.write(output_stream) return a4_document_path def set_active_font(font): """Register the font to use in header and footer of the diary""" try: pdfmetrics.registerFont(TTFont(font, font + '.ttf')) except TTFError: font = 'FreeSansLocal' pdfmetrics.registerFont(TTFont(font, DEFAULT_FONT)) return font def parse_date(s): try: return datetime.datetime.strptime(s, "%d/%m/%Y") except ValueError: return None
2.46875
2
wextractor/extractors/csv_extractor.py
codeforamerica/w-drive-extractor
3
141
<filename>wextractor/extractors/csv_extractor.py #!/usr/bin/env python import urllib2 import httplib from urlparse import urlparse import csv from wextractor.extractors.extractor import Extractor class CsvExtractor(Extractor): def __init__(self, target, header=None, dtypes=None, url=None): ''' CsvExtractor initializes with an optional url flag that tells the extractor whether or not the resource is local or remote so that it can be loaded accordingly ''' super(CsvExtractor, self).__init__(target, header, dtypes) if url is None: self.url = self.detect_url(target) elif type(url) != bool: raise TypeError('url kwarg must be of type bool') else: self.url = url def detect_url(self, target): # see: http://stackoverflow.com/questions/2924422/how-do-i-determine-if-a-web-page-exists-with-shell-scripting # and http://stackoverflow.com/questions/1140661/python-get-http-response-code-from-a-url # for additional information good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY] # check to see if we have a scheme in the url, and append one if not parsed_target = urlparse(target) if bool(parsed_target.scheme) is False: target = 'http://' + target host, path = urlparse(target)[1:3] try: conn = httplib.HTTPConnection(host) conn.request("HEAD", path) status = conn.getresponse().status except StandardError: status = None return status in good_codes def extract(self): if self.url: raw_data = urllib2.urlopen(self.target).read().decode('utf-8-sig').rstrip() else: with open(self.target, 'r') as f: raw_data = f.read().decode('utf-8-sig').rstrip() # standardize the file endings raw_data = raw_data.replace('\r\n', '\n').replace('\r', '\n') if self.header is None: # use first line if self.header not defined current_headers = raw_data.split('\n')[0].split(',') raw_data = '\n'.join(raw_data.split('\n')[1:]) else: current_headers = self.header output = [] reader = csv.reader(raw_data.splitlines(), delimiter=',') for row in reader: output.append( self.transform_row(current_headers, row) ) return output
3.109375
3
pyscf/geomopt/berny_solver.py
r-peng/pyscf
2
142
<reponame>r-peng/pyscf #!/usr/bin/env python # Copyright 2014-2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Interface to geometry optimizer pyberny https://github.com/jhrmnn/pyberny ''' from __future__ import absolute_import import pkg_resources try: dist = pkg_resources.get_distribution('pyberny') except pkg_resources.DistributionNotFound: dist = None if dist is None or [int(x) for x in dist.version.split('.')] < [0, 6, 2]: msg = ('Geometry optimizer Pyberny not found or outdated. Install or update ' 'with:\n\n\tpip install -U pyberny') raise ImportError(msg) import time import numpy import logging from pyscf import lib from pyscf.geomopt.addons import (as_pyscf_method, dump_mol_geometry, symmetrize) from pyscf import __config__ from pyscf.grad.rhf import GradientsBasics from berny import Berny, geomlib, coords # Overwrite pyberny's atomic unit coords.angstrom = 1./lib.param.BOHR INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True) ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True) def to_berny_geom(mol, include_ghost=INCLUDE_GHOST): atom_charges = mol.atom_charges() if include_ghost: # Symbol Ghost is not supported in current version of pyberny #species = [mol.atom_symbol(i) if z != 0 else 'Ghost' # for i,z in enumerate(atom_charges)] species = [mol.atom_symbol(i) if z != 0 else 'H' for i,z in enumerate(atom_charges)] coords = mol.atom_coords() * lib.param.BOHR else: atmlst = numpy.where(atom_charges != 0)[0] # Exclude ghost atoms species = [mol.atom_symbol(i) for i in atmlst] coords = mol.atom_coords()[atmlst] * lib.param.BOHR # geomlib.Geometry is available in the new version of pyberny solver. (issue #212) if getattr(geomlib, 'Geometry', None): return geomlib.Geometry(species, coords) else: return geomlib.Molecule(species, coords) def _geom_to_atom(mol, geom, include_ghost): coords = geom.coords if include_ghost: atom_coords = coords / lib.param.BOHR else: atmlst = numpy.where(mol.atom_charges() != 0)[0] atom_coords = mol.atom_coords() atom_coords[atmlst] = coords / lib.param.BOHR return atom_coords def to_berny_log(pyscf_log): '''Adapter to allow pyberny to use pyscf.logger ''' class PyscfHandler(logging.Handler): def emit(self, record): pyscf_log.info(record.getMessage()) log = logging.getLogger('{}.{}'.format(__name__, id(pyscf_log))) log.addHandler(PyscfHandler()) log.setLevel('INFO') return log def kernel(method, assert_convergence=ASSERT_CONV, include_ghost=INCLUDE_GHOST, callback=None, **kwargs): '''Optimize geometry with pyberny for the given method. To adjust the convergence threshold, parameters can be set in kwargs as below: .. code-block:: python conv_params = { # They are default settings 'gradientmax': 0.45e-3, # Eh/[Bohr|rad] 'gradientrms': 0.15e-3, # Eh/[Bohr|rad] 'stepmax': 1.8e-3, # [Bohr|rad] 'steprms': 1.2e-3, # [Bohr|rad] } from pyscf.geomopt import berny_solver opt = berny_solver.GeometryOptimizer(method) opt.params = conv_params opt.kernel() ''' t0 = time.clock(), time.time() mol = method.mol.copy() if 'log' in kwargs: log = lib.logger.new_logger(method, kwargs['log']) elif 'verbose' in kwargs: log = lib.logger.new_logger(method, kwargs['verbose']) else: log = lib.logger.new_logger(method) if isinstance(method, lib.GradScanner): g_scanner = method elif isinstance(method, GradientsBasics): g_scanner = method.as_scanner() elif getattr(method, 'nuc_grad_method', None): g_scanner = method.nuc_grad_method().as_scanner() else: raise NotImplementedError('Nuclear gradients of %s not available' % method) if not include_ghost: g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0] # When symmetry is enabled, the molecule may be shifted or rotated to make # the z-axis be the main axis. The transformation can cause inconsistency # between the optimization steps. The transformation is muted by setting # an explict point group to the keyword mol.symmetry (see symmetry # detection code in Mole.build function). if mol.symmetry: mol.symmetry = mol.topgroup # temporary interface, taken from berny.py optimize function berny_log = to_berny_log(log) geom = to_berny_geom(mol, include_ghost) optimizer = Berny(geom, logger=berny_log, **kwargs) t1 = t0 e_last = 0 for cycle, geom in enumerate(optimizer): if log.verbose >= lib.logger.NOTE: log.note('\nGeometry optimization cycle %d', cycle+1) dump_mol_geometry(mol, geom.coords, log) if mol.symmetry: geom.coords = symmetrize(mol, geom.coords) mol.set_geom_(_geom_to_atom(mol, geom, include_ghost), unit='Bohr') energy, gradients = g_scanner(mol) log.note('cycle %d: E = %.12g dE = %g norm(grad) = %g', cycle+1, energy, energy - e_last, numpy.linalg.norm(gradients)) e_last = energy if callable(callback): callback(locals()) if assert_convergence and not g_scanner.converged: raise RuntimeError('Nuclear gradients of %s not converged' % method) optimizer.send((energy, gradients)) t1 = log.timer('geomoetry optimization cycle %d'%cycle, *t1) t0 = log.timer('geomoetry optimization', *t0) return optimizer._converged, mol def optimize(method, assert_convergence=ASSERT_CONV, include_ghost=INCLUDE_GHOST, callback=None, **kwargs): '''Optimize geometry with pyberny for the given method. To adjust the convergence threshold, parameters can be set in kwargs as below: .. code-block:: python conv_params = { # They are default settings 'gradientmax': 0.45e-3, # Eh/[Bohr|rad] 'gradientrms': 0.15e-3, # Eh/[Bohr|rad] 'stepmax': 1.8e-3, # [Bohr|rad] 'steprms': 1.2e-3, # [Bohr|rad] } from pyscf.geomopt import berny_solver newmol = berny_solver.optimize(method, **conv_params) ''' return kernel(method, assert_convergence, include_ghost, callback, **kwargs)[1] class GeometryOptimizer(lib.StreamObject): '''Optimize the molecular geometry for the input method. Note the method.mol will be changed after calling .kernel() method. ''' def __init__(self, method): self.method = method self.callback = None self.params = {} self.converged = False self.max_cycle = 100 @property def mol(self): return self.method.mol @mol.setter def mol(self, x): self.method.mol = x def kernel(self, params=None): if params is not None: self.params.update(params) params = dict(self.params) params['maxsteps'] = self.max_cycle self.converged, self.mol = \ kernel(self.method, callback=self.callback, **params) return self.mol optimize = kernel del(INCLUDE_GHOST, ASSERT_CONV) if __name__ == '__main__': from pyscf import gto from pyscf import scf, dft, cc, mp mol = gto.M(atom=''' C 1.1879 -0.3829 0.0000 C 0.0000 0.5526 0.0000 O -1.1867 -0.2472 0.0000 H -1.9237 0.3850 0.0000 H 2.0985 0.2306 0.0000 H 1.1184 -1.0093 0.8869 H 1.1184 -1.0093 -0.8869 H -0.0227 1.1812 0.8852 H -0.0227 1.1812 -0.8852 ''', basis='3-21g') mf = scf.RHF(mol) conv_params = { 'gradientmax': 6e-3, # Eh/Bohr 'gradientrms': 2e-3, # Eh/Bohr 'stepmax': 2e-2, # Bohr 'steprms': 1.5e-2, # Bohr } mol1 = optimize(mf, **conv_params) print(mf.kernel() - -153.219208484874) print(scf.RHF(mol1).kernel() - -153.222680852335) mf = dft.RKS(mol) mf.xc = 'pbe,' mf.conv_tol = 1e-7 mol1 = optimize(mf) mymp2 = mp.MP2(scf.RHF(mol)) mol1 = optimize(mymp2) mycc = cc.CCSD(scf.RHF(mol)) mol1 = optimize(mycc)
1.703125
2
src/main/python/taf/foundation/api/ui/aut.py
WesleyPeng/uiXautomation
6
143
# Copyright (c) 2017-2018 {Flair Inc.} <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from taf.foundation.utils import ConnectionCache class AUT(object): cache = None current = None def __init__( self, name=None, identifier=None, **kwargs ): if not AUT.cache: AUT.cache = ConnectionCache(identifier) self.id = self.cache.register( self._create_instance(name, **kwargs), identifier ) AUT.current = self @staticmethod def launch(app_location, **kwargs): raise NotImplementedError( 'Launch application' ) def activate(self): if self.id != self.cache.current_key: self.cache.current_key = self.id AUT.current = self def take_screenshot(self): self.activate() return self.get_screenshot_data() def close(self): self.cache.close(self.id) if not self.cache.current: AUT.cache = None AUT.current = None def get_screenshot_data(self): raise NotImplementedError( 'Get screenshot data from AUT' ) def _create_instance(self, name, **kwargs): raise NotImplementedError( 'Create instance of AUT' )
1.929688
2
algo/vigenere.py
dkushche/Crypto
3
144
<filename>algo/vigenere.py import crypto_tools from itertools import cycle def vigenere_little_doc(): return "encrypt/decrypt using vigenere cypher" def vigenere_full_doc(): return """ Advanced caesar we change dict on each char """ def vigenere_str_to_list(string, vigenere_dict): result = list() for char in string: try: result.append(vigenere_dict.index(char)) except ValueError: err_msg = f"There is no {key[inx]} in alphabet" raise ValueError(err_msg) return result def vigenere_processing(data, key, lang, encrypt): vigenere_dict = crypto_tools.get_param_json_data("alphabets.json", lang) num_data = vigenere_str_to_list(data, vigenere_dict) num_key = vigenere_str_to_list(key, vigenere_dict) dict_size = len(vigenere_dict) num_key = cycle(num_key) if (encrypt == "encrypt"): num_result = [(a + b) % dict_size for a, b in zip(num_data, num_key)] else: num_result = [ (a + dict_size - b) % dict_size for a, b in zip(num_data, num_key) ] result_str = "" for val in num_result: result_str += vigenere_dict[val] return result_str @crypto_tools.file_manipulation() def vigenere(data): lang = crypto_tools.cterm('input', 'Data language: ', 'ans') key = crypto_tools.cterm('input', 'Enter key(str): ', 'ans') encrypt = crypto_tools.cterm('input', 'You want encrypt or decrypt: ', 'ans') if encrypt != "encrypt" and encrypt != "decrypt": raise ValueError("Incorrect action") data = crypto_tools.utf_decoder(data) return vigenere_processing(data, key, lang, encrypt) vigenere.little_doc = vigenere_little_doc vigenere.full_doc = vigenere_full_doc
3.1875
3
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/base_context.py
space-scl/emacs.d
4
145
""" Contexts are the "values" that Python would return. However Contexts are at the same time also the "contexts" that a user is currently sitting in. A ContextSet is typically used to specify the return of a function or any other static analysis operation. In jedi there are always multiple returns and not just one. """ from functools import reduce from operator import add from parso.python.tree import ExprStmt, SyncCompFor from jedi import debug from jedi._compatibility import zip_longest, unicode from jedi.parser_utils import clean_scope_docstring from jedi.common import BaseContextSet, BaseContext from jedi.evaluate.helpers import SimpleGetItemNotFound from jedi.evaluate.utils import safe_property from jedi.evaluate.cache import evaluator_as_method_param_cache from jedi.cache import memoize_method _sentinel = object() class HelperContextMixin(object): def get_root_context(self): context = self while True: if context.parent_context is None: return context context = context.parent_context @classmethod @evaluator_as_method_param_cache() def create_cached(cls, *args, **kwargs): return cls(*args, **kwargs) def execute(self, arguments): return self.evaluator.execute(self, arguments=arguments) def execute_evaluated(self, *value_list): from jedi.evaluate.arguments import ValuesArguments arguments = ValuesArguments([ContextSet([value]) for value in value_list]) return self.evaluator.execute(self, arguments) def execute_annotation(self): return self.execute_evaluated() def gather_annotation_classes(self): return ContextSet([self]) def merge_types_of_iterate(self, contextualized_node=None, is_async=False): return ContextSet.from_sets( lazy_context.infer() for lazy_context in self.iterate(contextualized_node, is_async) ) def py__getattribute__(self, name_or_str, name_context=None, position=None, search_global=False, is_goto=False, analysis_errors=True): """ :param position: Position of the last statement -> tuple of line, column """ if name_context is None: name_context = self from jedi.evaluate import finder f = finder.NameFinder(self.evaluator, self, name_context, name_or_str, position, analysis_errors=analysis_errors) filters = f.get_filters(search_global) if is_goto: return f.filter_name(filters) return f.find(filters, attribute_lookup=not search_global) def py__await__(self): await_context_set = self.py__getattribute__(u"__await__") if not await_context_set: debug.warning('Tried to run __await__ on context %s', self) return await_context_set.execute_evaluated() def eval_node(self, node): return self.evaluator.eval_element(self, node) def create_context(self, node, node_is_context=False, node_is_object=False): return self.evaluator.create_context(self, node, node_is_context, node_is_object) def iterate(self, contextualized_node=None, is_async=False): debug.dbg('iterate %s', self) if is_async: from jedi.evaluate.lazy_context import LazyKnownContexts # TODO if no __aiter__ contexts are there, error should be: # TypeError: 'async for' requires an object with __aiter__ method, got int return iter([ LazyKnownContexts( self.py__getattribute__('__aiter__').execute_evaluated() .py__getattribute__('__anext__').execute_evaluated() .py__getattribute__('__await__').execute_evaluated() .py__stop_iteration_returns() ) # noqa ]) return self.py__iter__(contextualized_node) def is_sub_class_of(self, class_context): for cls in self.py__mro__(): if cls.is_same_class(class_context): return True return False def is_same_class(self, class2): # Class matching should prefer comparisons that are not this function. if type(class2).is_same_class != HelperContextMixin.is_same_class: return class2.is_same_class(self) return self == class2 class Context(HelperContextMixin, BaseContext): """ Should be defined, otherwise the API returns empty types. """ predefined_names = {} """ To be defined by subclasses. """ tree_node = None @property def api_type(self): # By default just lower name of the class. Can and should be # overwritten. return self.__class__.__name__.lower() def py__getitem__(self, index_context_set, contextualized_node): from jedi.evaluate import analysis # TODO this context is probably not right. analysis.add( contextualized_node.context, 'type-error-not-subscriptable', contextualized_node.node, message="TypeError: '%s' object is not subscriptable" % self ) return NO_CONTEXTS def py__iter__(self, contextualized_node=None): if contextualized_node is not None: from jedi.evaluate import analysis analysis.add( contextualized_node.context, 'type-error-not-iterable', contextualized_node.node, message="TypeError: '%s' object is not iterable" % self) return iter([]) def get_signatures(self): return [] def is_class(self): return False def is_instance(self): return False def is_function(self): return False def is_module(self): return False def is_namespace(self): return False def is_compiled(self): return False def is_bound_method(self): return False def py__bool__(self): """ Since Wrapper is a super class for classes, functions and modules, the return value will always be true. """ return True def py__doc__(self): try: self.tree_node.get_doc_node except AttributeError: return '' else: return clean_scope_docstring(self.tree_node) return None def get_safe_value(self, default=_sentinel): if default is _sentinel: raise ValueError("There exists no safe value for context %s" % self) return default def py__call__(self, arguments): debug.warning("no execution possible %s", self) return NO_CONTEXTS def py__stop_iteration_returns(self): debug.warning("Not possible to return the stop iterations of %s", self) return NO_CONTEXTS def get_qualified_names(self): # Returns Optional[Tuple[str, ...]] return None def is_stub(self): # The root context knows if it's a stub or not. return self.parent_context.is_stub() def iterate_contexts(contexts, contextualized_node=None, is_async=False): """ Calls `iterate`, on all contexts but ignores the ordering and just returns all contexts that the iterate functions yield. """ return ContextSet.from_sets( lazy_context.infer() for lazy_context in contexts.iterate(contextualized_node, is_async=is_async) ) class _ContextWrapperBase(HelperContextMixin): predefined_names = {} @safe_property def name(self): from jedi.evaluate.names import ContextName wrapped_name = self._wrapped_context.name if wrapped_name.tree_name is not None: return ContextName(self, wrapped_name.tree_name) else: from jedi.evaluate.compiled import CompiledContextName return CompiledContextName(self, wrapped_name.string_name) @classmethod @evaluator_as_method_param_cache() def create_cached(cls, evaluator, *args, **kwargs): return cls(*args, **kwargs) def __getattr__(self, name): assert name != '_wrapped_context', 'Problem with _get_wrapped_context' return getattr(self._wrapped_context, name) class LazyContextWrapper(_ContextWrapperBase): @safe_property @memoize_method def _wrapped_context(self): with debug.increase_indent_cm('Resolve lazy context wrapper'): return self._get_wrapped_context() def __repr__(self): return '<%s>' % (self.__class__.__name__) def _get_wrapped_context(self): raise NotImplementedError class ContextWrapper(_ContextWrapperBase): def __init__(self, wrapped_context): self._wrapped_context = wrapped_context def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._wrapped_context) class TreeContext(Context): def __init__(self, evaluator, parent_context, tree_node): super(TreeContext, self).__init__(evaluator, parent_context) self.predefined_names = {} self.tree_node = tree_node def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.tree_node) class ContextualizedNode(object): def __init__(self, context, node): self.context = context self.node = node def get_root_context(self): return self.context.get_root_context() def infer(self): return self.context.eval_node(self.node) def __repr__(self): return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context) class ContextualizedName(ContextualizedNode): # TODO merge with TreeNameDefinition?! @property def name(self): return self.node def assignment_indexes(self): """ Returns an array of tuple(int, node) of the indexes that are used in tuple assignments. For example if the name is ``y`` in the following code:: x, (y, z) = 2, '' would result in ``[(1, xyz_node), (0, yz_node)]``. When searching for b in the case ``a, *b, c = [...]`` it will return:: [(slice(1, -1), abc_node)] """ indexes = [] is_star_expr = False node = self.node.parent compare = self.node while node is not None: if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'): for i, child in enumerate(node.children): if child == compare: index = int(i / 2) if is_star_expr: from_end = int((len(node.children) - i) / 2) index = slice(index, -from_end) indexes.insert(0, (index, node)) break else: raise LookupError("Couldn't find the assignment.") is_star_expr = False elif node.type == 'star_expr': is_star_expr = True elif isinstance(node, (ExprStmt, SyncCompFor)): break compare = node node = node.parent return indexes def _getitem(context, index_contexts, contextualized_node): from jedi.evaluate.context.iterable import Slice # The actual getitem call. simple_getitem = getattr(context, 'py__simple_getitem__', None) result = NO_CONTEXTS unused_contexts = set() for index_context in index_contexts: if simple_getitem is not None: index = index_context if isinstance(index_context, Slice): index = index.obj try: method = index.get_safe_value except AttributeError: pass else: index = method(default=None) if type(index) in (float, int, str, unicode, slice, bytes): try: result |= simple_getitem(index) continue except SimpleGetItemNotFound: pass unused_contexts.add(index_context) # The index was somehow not good enough or simply a wrong type. # Therefore we now iterate through all the contexts and just take # all results. if unused_contexts or not index_contexts: result |= context.py__getitem__( ContextSet(unused_contexts), contextualized_node ) debug.dbg('py__getitem__ result: %s', result) return result class ContextSet(BaseContextSet): def py__class__(self): return ContextSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.evaluate.lazy_context import get_merged_lazy_context type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_contexts in zip_longest(*type_iters): yield get_merged_lazy_context( [l for l in lazy_contexts if l is not None] ) def execute(self, arguments): return ContextSet.from_sets(c.evaluator.execute(c, arguments) for c in self._set) def execute_evaluated(self, *args, **kwargs): return ContextSet.from_sets(c.execute_evaluated(*args, **kwargs) for c in self._set) def py__getattribute__(self, *args, **kwargs): if kwargs.get('is_goto'): return reduce(add, [c.py__getattribute__(*args, **kwargs) for c in self._set], []) return ContextSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): context_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: context_set |= method() return context_set def gather_annotation_classes(self): return ContextSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] NO_CONTEXTS = ContextSet([]) def iterator_to_context_set(func): def wrapper(*args, **kwargs): return ContextSet(func(*args, **kwargs)) return wrapper
3
3
kiwi_scp/commands/cmd_cmd.py
yavook/kiwi-scp
0
146
<reponame>yavook/kiwi-scp from typing import Tuple import click from .cmd import KiwiCommandType, KiwiCommand from .decorators import kiwi_command from ..executable import COMPOSE_EXE from ..instance import Instance from ..project import Project @click.argument( "compose_args", metavar="[ARG]...", nargs=-1, ) @click.argument( "compose_cmd", metavar="COMMAND", ) @kiwi_command( short_help="Run docker-compose command", # ignore arguments looking like options # just pass everything down to docker-compose context_settings={"ignore_unknown_options": True}, ) class CmdCommand(KiwiCommand): """Run raw docker-compose command in a project""" type = KiwiCommandType.PROJECT enabled_only = True @classmethod def run_for_project(cls, instance: Instance, project: Project, compose_cmd: str = None, compose_args: Tuple[str] = None) -> None: COMPOSE_EXE.run([compose_cmd, *compose_args], **project.process_kwargs)
2.203125
2
homework/Testing with Examples (Network)/impl_fail-add_relation-does_not_fail_when_person1_is_non_existent.py
rvprasad/software-testing-course
11
147
<filename>homework/Testing with Examples (Network)/impl_fail-add_relation-does_not_fail_when_person1_is_non_existent.py class MyError(Exception): pass class PropertyContainer(object): def __init__(self): self.props = {} def set_property(self, prop, value): self.props[prop] = value def get_property(self, prop): return self.props.get(prop) def has_property(self, prop): return prop in self.props class Node(PropertyContainer): pass class Edge(PropertyContainer): def __init__(self, node1, node2): super().__init__() self.node1 = node1 self.node2 = node2 class Network(object): NAME_PROP = "name" # NAME_PROP is an optional string property FRIEND_PROP = "friend" # FRIEND_PROP is an optional boolean property def __init__(self): self.nodes = set() self.edges = set() def create_person(self): node = Node() self.nodes.add(node) return node # add prop to value; overwrite if prop exists def add_person_property(self, person, prop, value): # flag non-existent person if person not in self.nodes: raise RuntimeError("person does not exist") if prop == Network.NAME_PROP: # disallow non-string values for NAME_PROP property if not isinstance(value, str): raise TypeError( "{0} is a string property".format(Network.NAME_PROP)) # disallow multiple people to have the same name for p in self.nodes: if p.get_property(Network.NAME_PROP) == value and \ p is not person: raise ValueError("{0} name already taken".format(value)) person.set_property(prop, value) def add_relation(self, person1, person2): # flag non-existent persons if person1 not in self.nodes: # raise RuntimeError("person1 does not exist") person1 = self.create_person() if person2 not in self.nodes: raise RuntimeError("person2 does not exist") # flag existing edge for e in self.edges: if (e.node1 is person1 and e.node2 is person2) or \ (e.node1 is person2 and e.node2 is person1): raise ValueError("relation exists") self.edges.add(Edge(person1, person2)) def add_relation_property(self, person1, person2, prop, value): # disallow non-boolean values for FRIEND_PROP property if prop == Network.FRIEND_PROP and not isinstance(value, bool): raise TypeError( "{0} is a boolean property".format(Network.FRIEND_PROP)) for e in self.edges: if (e.node1 is person1 and e.node2 is person2) or \ (e.node1 is person2 and e.node2 is person1): e.set_property(prop, value) return # flag non-existent relation raise RuntimeError("Non-existent relation") # get a person with given name def get_person(self, name): # disallow non-string values for name if not isinstance(name, str): raise TypeError( "{0} is a string argument".format(Network.NAME_PROP)) for n in self.nodes: if n.get_property(Network.NAME_PROP) == name: return n # flag non-existent person raise RuntimeError("No person named {0}".format(name)) # get friends of friends of a person with given name def friends_of_friends(self, name): # disallow non-string values for name if not isinstance(name, str): raise TypeError( "{0} is a string argument".format(Network.NAME_PROP)) # flag non-existent person person = self.get_person(name) visited = set([person]) i = 0 while i < 2: newly_visited = set() for p in (x for x in visited): for e in (x for x in self.edges if x.get_property(Network.FRIEND_PROP) == True): n1 = e.node1 n2 = e.node2 if n1 == p: newly_visited.add(e.node2) elif n2 == p: newly_visited.add(e.node1) visited = newly_visited i += 1 return list(visited)
3.5
4
tools/__init__.py
BranKein/Flask-template
0
148
from . import ip __all__ = ['ip']
1.140625
1
leetCode/algorithms/easy/count_and_say.py
ferhatelmas/algo
25
149
from itertools import groupby class Solution: def countAndSay(self, n): def gen(s): return "".join(str(len(list(g))) + k for k, g in groupby(s)) s, i = "1", 1 while i < n: s = gen(s) i += 1 return s
3.4375
3
conf/feature_config.py
pupuwudi/nlp_xiaojiang
0
150
<filename>conf/feature_config.py # -*- coding: UTF-8 -*- # !/usr/bin/python # @time :2019/5/10 9:13 # @author :Mo # @function :path of FeatureProject import pathlib import sys import os # base dir projectdir = str(pathlib.Path(os.path.abspath(__file__)).parent.parent) sys.path.append(projectdir) # path of BERT model model_dir = projectdir + '/Data/chinese_L-12_H-768_A-12' config_name = model_dir + '/bert_config.json' ckpt_name = model_dir + '/bert_model.ckpt' vocab_file = model_dir + '/vocab.txt' # gpu使用率 gpu_memory_fraction = 0.32 # 默认取倒数第二层的输出值作为句向量 layer_indexes = [-2] # 序列的最大程度 max_seq_len = 32
1.695313
2
tests/test_prep_read.py
taruma/hidrokit
5
151
"""Test for .prep.read module """ from hidrokit.prep import read import numpy as np import pandas as pd A = pd.DataFrame( data=[ [1, 3, 4, np.nan, 2, np.nan], [np.nan, 2, 3, np.nan, 1, 4], [2, np.nan, 1, 3, 4, np.nan] ], columns=['A', 'B', 'C', 'D', 'E', 'F'] ) A_date = A.set_index(pd.date_range("20190617", "20190619")) res_A_number = {'A': [1], 'B': [2], 'C': [], 'D': [0, 1], 'E': [], 'F': [0, 2]} res_A_date = {'A': ['0618'], 'B': ['0619'], 'C': [], 'D': ['0617', '0618'], 'E': [], 'F': ['0617', '0619']} def test_read_number(): test = read.missing_row(A, date_index=False) assert test.items() == res_A_number.items() def test_read_date(): test = read.missing_row(A_date, date_format="%m%d") assert test.items() == res_A_date.items()
2.796875
3
app/blueprints/department_blueprint.py
Maxcutex/personal_ecommerce
0
152
from flasgger import swag_from from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth from app.controllers.department_controller import DepartmentController url_prefix = '{}/departments'.format(BaseBlueprint.base_url_prefix) department_blueprint = Blueprint('department', __name__, url_prefix=url_prefix) department_controller = DepartmentController(request) @department_blueprint.route('/', methods=['GET']) @Auth.has_permission('view_department') @swag_from('documentation/get_all_departments.yml') def list_departments(): return department_controller.list_departments() @department_blueprint.route('/<int:department_id>', methods=['GET']) @Auth.has_permission('view_department') @swag_from('documentation/get_single_department.yml') def get_department(department_id): return department_controller.get_department(department_id) @department_blueprint.route('/', methods=['POST']) @Auth.has_role('admin') @Security.validator(['name|required:ifExists_Department_name', 'description|required']) @swag_from('documentation/create_department.yml') def create_department(): return department_controller.create_department() @department_blueprint.route('/<int:department_id>', methods=['DELETE']) @Auth.has_role('admin') @swag_from('documentation/delete_department.yml') def delete_department(department_id): return department_controller.delete_department(department_id) @department_blueprint.route('/<int:department_id>', methods=['PATCH']) @Auth.has_role('admin') @Security.validator(['name|optional', 'description|optional']) @swag_from('documentation/update_department.yml') def update_department(department_id): return department_controller.update_department(department_id)
2.203125
2
src/train_DFCAN.py
ikecoglu/DL-SR
46
153
<reponame>ikecoglu/DL-SR import argparse from keras import optimizers import matplotlib.pyplot as plt import numpy as np import datetime from keras.callbacks import TensorBoard import glob import os import tensorflow as tf from models import * from utils.lr_controller import ReduceLROnPlateau from utils.data_loader import data_loader, data_loader_multi_channel from utils.utils import img_comp from utils.loss import loss_mse_ssim parser = argparse.ArgumentParser() parser.add_argument("--gpu_id", type=int, default=1) parser.add_argument("--gpu_memory_fraction", type=float, default=0.3) parser.add_argument("--mixed_precision_training", type=int, default=1) parser.add_argument("--data_dir", type=str, default="../dataset/train/F-actin") parser.add_argument("--save_weights_dir", type=str, default="../trained_models") parser.add_argument("--model_name", type=str, default="DFCAN") parser.add_argument("--patch_height", type=int, default=128) parser.add_argument("--patch_width", type=int, default=128) parser.add_argument("--input_channels", type=int, default=9) parser.add_argument("--scale_factor", type=int, default=2) parser.add_argument("--norm_flag", type=int, default=1) parser.add_argument("--iterations", type=int, default=1000000) parser.add_argument("--sample_interval", type=int, default=1000) parser.add_argument("--validate_interval", type=int, default=2000) parser.add_argument("--validate_num", type=int, default=500) parser.add_argument("--batch_size", type=int, default=4) parser.add_argument("--start_lr", type=float, default=1e-4) parser.add_argument("--lr_decay_factor", type=float, default=0.5) parser.add_argument("--load_weights", type=int, default=0) parser.add_argument("--optimizer_name", type=str, default="adam") args = parser.parse_args() gpu_id = str(args.gpu_id) gpu_memory_fraction = args.gpu_memory_fraction mixed_precision_training = str(args.mixed_precision_training) data_dir = args.data_dir save_weights_dir = args.save_weights_dir validate_interval = args.validate_interval batch_size = args.batch_size start_lr = args.start_lr lr_decay_factor = args.lr_decay_factor patch_height = args.patch_height patch_width = args.patch_width input_channels = args.input_channels scale_factor = args.scale_factor norm_flag = args.norm_flag validate_num = args.validate_num iterations = args.iterations load_weights = args.load_weights optimizer_name = args.optimizer_name model_name = args.model_name sample_interval = args.sample_interval os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = mixed_precision_training os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction) tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) data_name = data_dir.split('/')[-1] if input_channels == 1: save_weights_name = model_name + '-SISR_' + data_name cur_data_loader = data_loader train_images_path = data_dir + '/training_wf/' validate_images_path = data_dir + '/validate_wf/' else: save_weights_name = model_name + '-SIM_' + data_name cur_data_loader = data_loader_multi_channel train_images_path = data_dir + '/training/' validate_images_path = data_dir + '/validate/' save_weights_path = save_weights_dir + '/' + save_weights_name + '/' train_gt_path = data_dir + '/training_gt/' validate_gt_path = data_dir + '/validate_gt/' sample_path = save_weights_path + 'sampled_img/' if not os.path.exists(save_weights_path): os.mkdir(save_weights_path) if not os.path.exists(sample_path): os.mkdir(sample_path) # -------------------------------------------------------------------------------- # select models and optimizer # -------------------------------------------------------------------------------- modelFns = {'DFCAN': DFCAN16.DFCAN} modelFN = modelFns[model_name] optimizer_g = optimizers.adam(lr=start_lr, beta_1=0.9, beta_2=0.999) # -------------------------------------------------------------------------------- # define combined model # -------------------------------------------------------------------------------- g = modelFN((patch_height, patch_width, input_channels)) g.compile(loss=loss_mse_ssim, optimizer=optimizer_g) lr_controller = ReduceLROnPlateau(model=g, factor=lr_decay_factor, patience=10, mode='min', min_delta=1e-4, cooldown=0, min_lr=start_lr * 0.1, verbose=1) # -------------------------------------------------------------------------------- # about Tensorboard # -------------------------------------------------------------------------------- log_path = save_weights_path + 'graph' if not os.path.exists(log_path): os.mkdir(log_path) callback = TensorBoard(log_path) callback.set_model(g) train_names = 'training_loss' val_names = ['val_MSE', 'val_SSIM', 'val_PSNR', 'val_NRMSE'] def write_log(callback, names, logs, batch_no): summary = tf.Summary() summary_value = summary.value.add() summary_value.simple_value = logs summary_value.tag = names callback.writer.add_summary(summary, batch_no) callback.writer.flush() # -------------------------------------------------------------------------------- # Sample and validate # -------------------------------------------------------------------------------- def Validate(iter, sample=0): validate_path = glob.glob(validate_images_path + '*') validate_path.sort() if sample == 1: r, c = 3, 3 mses, nrmses, psnrs, ssims = [], [], [], [] img_show, gt_show, output_show = [], [], [] validate_path = np.random.choice(validate_path, size=r) for path in validate_path: [img, gt] = cur_data_loader([path], validate_images_path, validate_gt_path, patch_height, patch_width, 1, norm_flag=norm_flag, scale=scale_factor) output = np.squeeze(g.predict(img)) mses, nrmses, psnrs, ssims = img_comp(gt, output, mses, nrmses, psnrs, ssims) img_show.append(np.squeeze(np.mean(img, 3))) gt_show.append(np.squeeze(gt)) output_show.append(output) # show some examples fig, axs = plt.subplots(r, c) cnt = 0 for row in range(r): axs[row, 1].set_title('MSE=%.4f, SSIM=%.4f, PSNR=%.4f' % (mses[row], ssims[row], psnrs[row])) for col, image in enumerate([img_show, output_show, gt_show]): axs[row, col].imshow(np.squeeze(image[row])) axs[row, col].axis('off') cnt += 1 fig.savefig(sample_path + '%d.png' % iter) plt.close() else: if validate_num < validate_path.__len__(): validate_path = validate_path[0:validate_num] mses, nrmses, psnrs, ssims = [], [], [], [] for path in validate_path: [img, gt] = cur_data_loader([path], validate_images_path, validate_gt_path, patch_height, patch_width, 1, norm_flag=norm_flag, scale=scale_factor) output = np.squeeze(g.predict(img)) mses, nrmses, psnrs, ssims = img_comp(gt, output, mses, nrmses, psnrs, ssims) # if best, save weights.best g.save_weights(save_weights_path + 'weights.latest') if min(validate_nrmse) > np.mean(nrmses): g.save_weights(save_weights_path + 'weights.best') validate_nrmse.append(np.mean(nrmses)) curlr = lr_controller.on_epoch_end(iter, np.mean(nrmses)) write_log(callback, val_names[0], np.mean(mses), iter) write_log(callback, val_names[1], np.mean(ssims), iter) write_log(callback, val_names[2], np.mean(psnrs), iter) write_log(callback, val_names[3], np.mean(nrmses), iter) write_log(callback, 'lr', curlr, iter) # -------------------------------------------------------------------------------- # if exist, load weights # -------------------------------------------------------------------------------- if load_weights: if os.path.exists(save_weights_path + 'weights.best'): g.save_weights(save_weights_path + 'weights.best') print('Loading weights successfully: ' + save_weights_path + 'weights.best') elif os.path.exists(save_weights_path + 'weights.latest'): g.save_weights(save_weights_path + 'weights.latest') print('Loading weights successfully: ' + save_weights_path + 'weights.latest') # -------------------------------------------------------------------------------- # training # -------------------------------------------------------------------------------- start_time = datetime.datetime.now() loss_record = [] validate_nrmse = [np.Inf] lr_controller.on_train_begin() images_path = glob.glob(train_images_path + '/*') for it in range(iterations): # ------------------------------------ # train generator # ------------------------------------ input_g, gt_g = cur_data_loader(images_path, train_images_path, train_gt_path, patch_height, patch_width, batch_size, norm_flag=norm_flag, scale=scale_factor) loss_generator = g.train_on_batch(input_g, gt_g) loss_record.append(loss_generator) elapsed_time = datetime.datetime.now() - start_time print("%d epoch: time: %s, g_loss = %s" % (it + 1, elapsed_time, loss_generator)) if (it + 1) % sample_interval == 0: images_path = glob.glob(train_images_path + '/*') Validate(it + 1, sample=1) if (it + 1) % validate_interval == 0: Validate(it + 1, sample=0) write_log(callback, train_names, np.mean(loss_record), it + 1) loss_record = []
1.875
2
catalyst/exchange/live_graph_clock.py
erlendve/catalyst
0
154
<filename>catalyst/exchange/live_graph_clock.py import pandas as pd from catalyst.constants import LOG_LEVEL from catalyst.exchange.utils.stats_utils import prepare_stats from catalyst.gens.sim_engine import ( BAR, SESSION_START ) from logbook import Logger log = Logger('LiveGraphClock', level=LOG_LEVEL) class LiveGraphClock(object): """Realtime clock for live trading. This class is a drop-in replacement for :class:`zipline.gens.sim_engine.MinuteSimulationClock`. This mixes the clock with a live graph. Notes ----- This seemingly awkward approach allows us to run the program using a single thread. This is important because Matplotlib does not play nice with multi-threaded environments. Zipline probably does not either. Matplotlib has a pause() method which is a wrapper around time.sleep() used in the SimpleClock. The key difference is that users can still interact with the chart during the pause cycles. This is what enables us to keep a single thread. This is also why we are not using the 'animate' callback of Matplotlib. We need to direct access to the __iter__ method in order to yield events to Zipline. The :param:`time_skew` parameter represents the time difference between the exchange and the live trading machine's clock. It's not used currently. """ def __init__(self, sessions, context, callback=None, time_skew=pd.Timedelta('0s')): self.sessions = sessions self.time_skew = time_skew self._last_emit = None self._before_trading_start_bar_yielded = True self.context = context self.callback = callback def __iter__(self): from matplotlib import pyplot as plt yield pd.Timestamp.utcnow(), SESSION_START while True: current_time = pd.Timestamp.utcnow() current_minute = current_time.floor('1T') if self._last_emit is None or current_minute > self._last_emit: log.debug('emitting minutely bar: {}'.format(current_minute)) self._last_emit = current_minute yield current_minute, BAR recorded_cols = list(self.context.recorded_vars.keys()) df, _ = prepare_stats( self.context.frame_stats, recorded_cols=recorded_cols ) self.callback(self.context, df) else: # I can't use the "animate" reactive approach here because # I need to yield from the main loop. # Workaround: https://stackoverflow.com/a/33050617/814633 plt.pause(1)
2.703125
3
invera/api/tests.py
LeoLeiva/todo-challenge
0
155
<filename>invera/api/tests.py # -*- coding: utf-8 -*- from __future__ import unicode_literals import inspect from task.models import InveraTask from api.utils import send_test_csv_report from django.contrib.auth.models import User from rest_framework.test import APIClient, APITestCase from rest_framework.reverse import reverse from rest_framework import status TEST_RESULTS = [] RECIPIENTS = ['<EMAIL>'] class TaskListTestCase(APITestCase): def setUp(self) -> None: self.user = User.objects.create_user( username='test_user', password='<PASSWORD>') self.other_user = User.objects.create_user( username='other_user', password='<PASSWORD>') self.task = InveraTask.objects.create( userTask=self.user, title='My Initial Task') self.client = APIClient() @classmethod def tearDownClass(cls): User.objects.filter(username__in=['test_user', 'other_user']).delete() def test_create_task_with_un_authenticate_user(self): """ En este caso de prueba, estamos probando la API Task Create utilizando un usuario no autenticado. """ response = self.client.post( reverse('api-task'), {'title': 'My Task 1'}, format='json') is_passed = response.status_code == status.HTTP_403_FORBIDDEN TEST_RESULTS.append({ "result": "Passed" if is_passed else "Failed", "test_name": inspect.currentframe().f_code.co_name, "test_description": "El usuario no autenticado no puede agregar una tarea a la lista" }) if is_passed: print("Resultado: Aprobado") else: print("Resultado: Fallido") print("Nombre del test: " + inspect.currentframe().f_code.co_name) print("Descripcion: El usuario no autenticado no puede agregar una tarea a la lista") print("-----------") def test_put_task_with_un_authenticate_user(self): """ En este caso de prueba, estamos probando la API Task PUT utilizando un usuario no autenticado. """ response = self.client.put( reverse('api-task'), {'title': 'My Task'}, format='json') is_passed = response.status_code == status.HTTP_403_FORBIDDEN TEST_RESULTS.append({ "result": "Passed" if is_passed else "Failed", "test_name": inspect.currentframe().f_code.co_name, "test_description": "El usuario no autenticado no puede modificar una tarea" }) if is_passed: print("Resultado: Aprobado") else: print("Resultado: Fallido") print("Nombre del test: " + inspect.currentframe().f_code.co_name) print("Descripcion: El usuario no autenticado no puede modificar una tarea") print("-----------") def test_put_task_with_authenticated_user(self): self.client.login(username='test_user', password='<PASSWORD>') response = self.client.put(reverse('api-task-detail', args=[str(self.task.idTask)]), {'title': 'My Task 2'}, format='json') is_passed = response.status_code == status.HTTP_200_OK TEST_RESULTS.append({ "result": "Passed" if is_passed else "Failed", "test_name": inspect.currentframe().f_code.co_name, "test_description": "Usuario autenticado puede modificar una tarea suya" }) if is_passed: print("Resultado: Aprobado") else: print("Resultado: Fallido") print("Nombre del test: " + inspect.currentframe().f_code.co_name) print("Descripcion: Usuario autenticado puede modificar una tarea suya") print("-----------") def test_get_other_user_task_detail(self): """ En este caso de prueba, estamos probando la API Task GET y tratando de obtener detalles de la tarea de un usuario que usa credenciales de usuario diferentes. """ self.client.login(username='other_user', password='<PASSWORD>') response = self.client.get(reverse('api-task-detail', args=[str(self.task.idTask)])) is_passed = response.status_code == status.HTTP_404_NOT_FOUND # is_passed = response.status_code == status.HTTP_403_FORBIDDEN TEST_RESULTS.append({ "result": "Passed" if is_passed else "Failed", "test_name": inspect.currentframe().f_code.co_name, "test_description": "Solo el propietario puede ver el detalle de la tarea" }) if is_passed: print("Resultado: Aprobado") else: print("Resultado: Fallido") print("Nombre del test: " + inspect.currentframe().f_code.co_name) print("Descripcion: Solo el propietario puede ver el detalle de la tarea") print("-----------") def test_create_task_with_authenticated_user(self): self.client.login(username='test_user', password='<PASSWORD>') response = self.client.post(reverse('api-task'), {'title': 'My Task'}, format='json') is_passed = response.status_code == status.HTTP_201_CREATED TEST_RESULTS.append({ "result": "Passed" if is_passed else "Failed", "test_name": inspect.currentframe().f_code.co_name, "test_description": "Usuario autenticado agrega tarea a la lista" }) if is_passed: print("Resultado: Aprobado") else: print("Resultado: Fallido") print("Nombre del test: " + inspect.currentframe().f_code.co_name) print("Descripcion: Usuario autenticado agrega tarea a la lista") print("-----------") def test_get_task_detail(self): self.client.login(username='test_user', password='<PASSWORD>') response = self.client.get(reverse('api-task-detail', args=[str(self.task.idTask)])) is_passed = response.status_code == status.HTTP_200_OK TEST_RESULTS.append({ "result": "Passed" if is_passed else "Failed", "test_name": inspect.currentframe().f_code.co_name, "test_description": "Usuario autenticado puede ver detalles de la tarea correctamente" }) if is_passed: print("Resultado: Aprobado") else: print("Resultado: Fallido") print("Nombre del test: " + inspect.currentframe().f_code.co_name) print("Descripcion: Usuario autenticado puede ver detalles de la tarea correctamente") print("-----------") class CSVReportTest(APITestCase): def test_send_csv(self): send_test_csv_report( test_results=TEST_RESULTS, recipients=RECIPIENTS )
2.28125
2
experiments/seidel-2d/tmp_files/6745.py
LoopTilingBenchmark/benchmark
0
156
from chill import * source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/seidel-2d/kernel.c') destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/seidel-2d/tmp_files/6745.c') procedure('kernel_seidel_2d') loop(0) known(' n > 2 ') tile(0,2,16,2) tile(0,4,16,4)
1.820313
2
baymax/api.py
dmrz/baymax
34
157
import json import aiohttp async def request(url, payload=None, params=None, headers=None): headers = {'content-type': 'application/json', **(headers or {})} data = payload and json.dumps(payload) async with aiohttp.ClientSession() as client: async with client.post( url, data=data, params=params, headers=headers) as resp: # TODO: Check response status json_response = await resp.json() return json_response async def get_updates(base_url, timeout, offset): params = { 'timeout': timeout, 'offset': offset } return await request(f'{base_url}/getUpdates', params=params) async def send_message(base_url, chat_id, text, reply_markup=None): payload = { 'chat_id': chat_id, 'text': text } if reply_markup is not None: payload['reply_markup'] = reply_markup return await request(f'{base_url}/sendMessage', payload) async def answer_callback_query( base_url, callback_query_id, text, show_alert, url=None, cache_time=None): payload = { 'callback_query_id': callback_query_id, 'text': text, 'show_alert': show_alert } if url is not None: payload['url'] = url if cache_time is not None: payload['cache_time'] = cache_time return await request(f'{base_url}/answerCallbackQuery', payload)
2.5625
3
nautobot_device_onboarding/tests/test_netdev_keeper.py
pszulczewski/nautobot-plugin-device-onboarding
13
158
"""Unit tests for nautobot_device_onboarding.netdev_keeper module and its classes. (c) 2020-2021 Network To Code Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from socket import gaierror from unittest import mock from django.test import TestCase from nautobot.dcim.models import Site, DeviceRole, Platform from nautobot_device_onboarding.exceptions import OnboardException from nautobot_device_onboarding.helpers import onboarding_task_fqdn_to_ip from nautobot_device_onboarding.models import OnboardingTask class NetdevKeeperTestCase(TestCase): """Test the NetdevKeeper Class.""" def setUp(self): """Create a superuser and token for API calls.""" self.site1 = Site.objects.create(name="USWEST", slug="uswest") self.device_role1 = DeviceRole.objects.create(name="Firewall", slug="firewall") self.platform1 = Platform.objects.create(name="JunOS", slug="junos", napalm_driver="junos") # self.platform2 = Platform.objects.create(name="Cisco NX-OS", slug="cisco-nx-os") self.onboarding_task4 = OnboardingTask.objects.create( ip_address="ntc123.local", site=self.site1, role=self.device_role1, platform=self.platform1 ) self.onboarding_task5 = OnboardingTask.objects.create( ip_address="bad.local", site=self.site1, role=self.device_role1, platform=self.platform1 ) self.onboarding_task7 = OnboardingTask.objects.create( ip_address="192.0.2.1/32", site=self.site1, role=self.device_role1, platform=self.platform1 ) @mock.patch("nautobot_device_onboarding.helpers.socket.gethostbyname") def test_check_ip(self, mock_get_hostbyname): """Check DNS to IP address.""" # Look up response value mock_get_hostbyname.return_value = "192.0.2.1" # FQDN -> IP onboarding_task_fqdn_to_ip(ot=self.onboarding_task4) # Run the check to change the IP address self.assertEqual(self.onboarding_task4.ip_address, "192.0.2.1") @mock.patch("nautobot_device_onboarding.helpers.socket.gethostbyname") def test_failed_check_ip(self, mock_get_hostbyname): """Check DNS to IP address failing.""" # Look up a failed response mock_get_hostbyname.side_effect = gaierror(8) # Check for bad.local raising an exception with self.assertRaises(OnboardException) as exc_info: onboarding_task_fqdn_to_ip(ot=self.onboarding_task5) self.assertEqual(exc_info.exception.message, "ERROR failed to complete DNS lookup: bad.local") self.assertEqual(exc_info.exception.reason, "fail-dns") # Check for exception with prefix address entered with self.assertRaises(OnboardException) as exc_info: onboarding_task_fqdn_to_ip(ot=self.onboarding_task7) self.assertEqual(exc_info.exception.reason, "fail-prefix") self.assertEqual(exc_info.exception.message, "ERROR appears a prefix was entered: 192.0.2.1/32")
2.140625
2
superneurons/tools/img_val/main.py
Phaeton-lang/baselines
0
159
<reponame>Phaeton-lang/baselines<filename>superneurons/tools/img_val/main.py<gh_stars>0 # Created by ay27 at 17/4/9 import os import matplotlib.pyplot as plt import struct import numpy as np def trans(row): return list(map(lambda x: np.uint8(x), row)) def read_image(filename): with open(filename, mode='rb') as file: n = file.read(8) n = struct.unpack("<Q", n)[0] c = file.read(8) c = struct.unpack("<Q", c)[0] h = file.read(8) h = struct.unpack("<Q", h)[0] w = file.read(8) w = struct.unpack("<Q", w)[0] print(n, c, h, w) for ii in range(n): r = trans(file.read(h*w)) g = trans(file.read(h*w)) b = trans(file.read(h*w)) if ii == 100: break print(file.tell() == os.fstat(file.fileno()).st_size) img = np.array([r,g,b]).transpose(1,0).reshape(h,w,c) print(img.shape) plt.imshow(img) plt.show() def read_label(path, ground_truth=None): with open(path, 'rb') as file: n = file.read(8) n = struct.unpack("<Q", n)[0] c = file.read(8) c = struct.unpack("<Q", c)[0] h = file.read(8) h = struct.unpack("<Q", h)[0] w = file.read(8) w = struct.unpack("<Q", w)[0] print(n, c, h, w) label = [] sets = set() while not (file.tell() == os.fstat(file.fileno()).st_size): ch = file.read(4) num = struct.unpack("<l", ch)[0] label.append(num) sets.add(num) # print(file.tell() == os.fstat(file.fileno()).st_size) print(label) print(len(label)) # print(label[900],label[901], label[902], label[903], label[904]) return label # if ground_truth: # g = [] # with open(ground_truth) as file: # for line in file: # g.append(int(line.split(' ')[1])) # np.testing.assert_array_equal(g, label) if __name__ == '__main__': # read_image('../../data/ilsvrc2012/img.bin') # read_label('../../data/ilsvrc2012/label.bin', '../../data/ilsvrc2012/val.txt') # read_image('../../build/cifar100_train_image.bin') # read_label('../../build/cifar100_train_label.bin') read_image('../../build/val_data_8.bin') for i in range(10): read_label('../../build/val_label_%d.bin' % i) # labels = [] # for i in range(10): # labels.append(read_label('../../build/val_label_%d.bin' % i)) # # ground = [] # with open('../../build/shuffled_list') as file: # ground.append()
2.28125
2
pymatgen/analysis/tests/test_piezo.py
exenGT/pymatgen
1
160
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Test for the piezo tensor class """ __author__ = "<NAME>" __version__ = "0.1" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Development" __date__ = "4/1/16" import os import unittest import numpy as np from pymatgen.analysis.piezo import PiezoTensor from pymatgen.util.testing import PymatgenTest class PiezoTest(PymatgenTest): def setUp(self): self.piezo_struc = self.get_structure("BaNiO3") self.voigt_matrix = np.array( [ [0.0, 0.0, 0.0, 0.0, 0.03839, 0.0], [0.0, 0.0, 0.0, 0.03839, 0.0, 0.0], [6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0], ] ) self.vasp_matrix = np.array( [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.03839], [0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0], [6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0], ] ) self.full_tensor_array = [ [[0.0, 0.0, 0.03839], [0.0, 0.0, 0.0], [0.03839, 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.03839], [0.0, 0.03839, 0.0]], [[6.89822, 0.0, 0.0], [0.0, 6.89822, 0.0], [0.0, 0.0, 27.4628]], ] def test_new(self): pt = PiezoTensor(self.full_tensor_array) self.assertArrayAlmostEqual(pt, self.full_tensor_array) bad_dim_array = np.zeros((3, 3)) self.assertRaises(ValueError, PiezoTensor, bad_dim_array) def test_from_voigt(self): bad_voigt = np.zeros((3, 7)) pt = PiezoTensor.from_voigt(self.voigt_matrix) self.assertArrayEqual(pt, self.full_tensor_array) self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt) self.assertArrayEqual(self.voigt_matrix, pt.voigt) def test_from_vasp_voigt(self): bad_voigt = np.zeros((3, 7)) pt = PiezoTensor.from_vasp_voigt(self.vasp_matrix) self.assertArrayEqual(pt, self.full_tensor_array) self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt) self.assertArrayEqual(self.voigt_matrix, pt.voigt) if __name__ == "__main__": unittest.main()
2.140625
2
nova/virt/driver.py
larsbutler/nova
0
161
<filename>nova/virt/driver.py # Copyright 2011 <NAME> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver base-classes: (Beginning of) the contract that compute drivers must follow, and shared types that support that contract """ import sys from oslo_log import log as logging from oslo_utils import importutils import nova.conf from nova.i18n import _, _LE, _LI from nova import utils from nova.virt import event as virtevent CONF = nova.conf.CONF LOG = logging.getLogger(__name__) def driver_dict_from_config(named_driver_config, *args, **kwargs): driver_registry = dict() for driver_str in named_driver_config: driver_type, _sep, driver = driver_str.partition('=') driver_class = importutils.import_class(driver) driver_registry[driver_type] = driver_class(*args, **kwargs) return driver_registry def get_block_device_info(instance, block_device_mapping): """Converts block device mappings for an instance to driver format. Virt drivers expect block device mapping to be presented in the format of a dict containing the following keys: - root_device_name: device name of the root disk - ephemerals: a (potentially empty) list of DriverEphemeralBlockDevice instances - swap: An instance of DriverSwapBlockDevice or None - block_device_mapping: a (potentially empty) list of DriverVolumeBlockDevice or any of it's more specialized subclasses. """ from nova.virt import block_device as virt_block_device block_device_info = { 'root_device_name': instance.root_device_name, 'ephemerals': virt_block_device.convert_ephemerals( block_device_mapping), 'block_device_mapping': virt_block_device.convert_all_volumes(*block_device_mapping) } swap_list = virt_block_device.convert_swap(block_device_mapping) block_device_info['swap'] = virt_block_device.get_swap(swap_list) return block_device_info def block_device_info_get_root(block_device_info): block_device_info = block_device_info or {} return block_device_info.get('root_device_name') def block_device_info_get_swap(block_device_info): block_device_info = block_device_info or {} return block_device_info.get('swap') or {'device_name': None, 'swap_size': 0} def swap_is_usable(swap): return swap and swap['device_name'] and swap['swap_size'] > 0 def block_device_info_get_ephemerals(block_device_info): block_device_info = block_device_info or {} ephemerals = block_device_info.get('ephemerals') or [] return ephemerals def block_device_info_get_mapping(block_device_info): block_device_info = block_device_info or {} block_device_mapping = block_device_info.get('block_device_mapping') or [] return block_device_mapping class ComputeDriver(object): """Base class for compute drivers. The interface to this class talks in terms of 'instances' (Amazon EC2 and internal Nova terminology), by which we mean 'running virtual machine' (XenAPI terminology) or domain (Xen or libvirt terminology). An instance has an ID, which is the identifier chosen by Nova to represent the instance further up the stack. This is unfortunately also called a 'name' elsewhere. As far as this layer is concerned, 'instance ID' and 'instance name' are synonyms. Note that the instance ID or name is not human-readable or customer-controlled -- it's an internal ID chosen by Nova. At the nova.virt layer, instances do not have human-readable names at all -- such things are only known higher up the stack. Most virtualization platforms will also have their own identity schemes, to uniquely identify a VM or domain. These IDs must stay internal to the platform-specific layer, and never escape the connection interface. The platform-specific layer is responsible for keeping track of which instance ID maps to which platform-specific ID, and vice versa. Some methods here take an instance of nova.compute.service.Instance. This is the data structure used by nova.compute to store details regarding an instance, and pass them into this layer. This layer is responsible for translating that generic data structure into terms that are specific to the virtualization platform. """ capabilities = { "has_imagecache": False, "supports_recreate": False, "supports_migrate_to_same_host": False, "supports_attach_interface": False, "supports_device_tagging": False, } def __init__(self, virtapi): self.virtapi = virtapi self._compute_event_callback = None def init_host(self, host): """Initialize anything that is necessary for the driver to function, including catching up with currently running VM's on the given host. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def cleanup_host(self, host): """Clean up anything that is necessary for the driver gracefully stop, including ending remote sessions. This is optional. """ pass def get_info(self, instance): """Get the current status of an instance, by name (not ID!) :param instance: nova.objects.instance.Instance object Returns a InstanceInfo object """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_num_instances(self): """Return the total number of virtual machines. Return the number of virtual machines that the hypervisor knows about. .. note:: This implementation works for all drivers, but it is not particularly efficient. Maintainers of the virt drivers are encouraged to override this method with something more efficient. """ return len(self.list_instances()) def instance_exists(self, instance): """Checks existence of an instance on the host. :param instance: The instance to lookup Returns True if an instance with the supplied ID exists on the host, False otherwise. .. note:: This implementation works for all drivers, but it is not particularly efficient. Maintainers of the virt drivers are encouraged to override this method with something more efficient. """ try: return instance.uuid in self.list_instance_uuids() except NotImplementedError: return instance.name in self.list_instances() def estimate_instance_overhead(self, instance_info): """Estimate the virtualization overhead required to build an instance of the given flavor. Defaults to zero, drivers should override if per-instance overhead calculations are desired. :param instance_info: Instance/flavor to calculate overhead for. :returns: Dict of estimated overhead values. """ return {'memory_mb': 0, 'disk_gb': 0} def list_instances(self): """Return the names of all the instances known to the virtualization layer, as a list. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def list_instance_uuids(self): """Return the UUIDS of all the instances known to the virtualization layer, as a list. """ raise NotImplementedError() def rebuild(self, context, instance, image_meta, injected_files, admin_password, bdms, detach_block_devices, attach_block_devices, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and remakes the VM with given 'metadata' and 'personalities'. This base class method shuts down the VM, detaches all block devices, then spins up the new VM afterwards. It may be overridden by hypervisors that need to - e.g. for optimisations, or when the 'VM' is actually proxied and needs to be held across the shutdown + spin up steps. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: <PASSWORD> to set in instance. :param bdms: block-device-mappings to use for rebuild :param detach_block_devices: function to detach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. :param attach_block_devices: function to attach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. :param network_info: instance network information :param recreate: True if the instance is being recreated on a new hypervisor - all the cleanup of old state is skipped. :param block_device_info: Information about block devices to be attached to the instance. :param preserve_ephemeral: True if the default ephemeral storage partition must be preserved on rebuild """ raise NotImplementedError() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param network_info: instance network information :param block_device_info: Information about block devices to be attached to the instance. """ raise NotImplementedError() def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None): """Destroy the specified instance from the Hypervisor. If the instance is not found (for example if networking failed), this function should still succeed. It's probably a good idea to log a warning in that case. :param context: security context :param instance: Instance object as returned by DB layer. :param network_info: instance network information :param block_device_info: Information about block devices that should be detached from the instance. :param destroy_disks: Indicates if disks should be destroyed :param migrate_data: implementation specific params """ raise NotImplementedError() def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup the instance resources . Instance should have been destroyed from the Hypervisor before calling this method. :param context: security context :param instance: Instance object as returned by DB layer. :param network_info: instance network information :param block_device_info: Information about block devices that should be detached from the instance. :param destroy_disks: Indicates if disks should be destroyed :param migrate_data: implementation specific params """ raise NotImplementedError() def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot the specified instance. After this is called successfully, the instance's state goes back to power_state.RUNNING. The virtualization platform should ensure that the reboot action has completed successfully even in cases in which the underlying domain/vm is paused or halted/stopped. :param instance: nova.objects.instance.Instance :param network_info: instance network information :param reboot_type: Either a HARD or SOFT reboot :param block_device_info: Info pertaining to attached volumes :param bad_volumes_callback: Function to handle any bad volumes encountered """ raise NotImplementedError() def get_console_pool_info(self, console_type): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_console_output(self, context, instance): """Get console output for an instance :param context: security context :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def get_vnc_console(self, context, instance): """Get connection info for a vnc console. :param context: security context :param instance: nova.objects.instance.Instance :returns an instance of console.type.ConsoleVNC """ raise NotImplementedError() def get_spice_console(self, context, instance): """Get connection info for a spice console. :param context: security context :param instance: nova.objects.instance.Instance :returns an instance of console.type.ConsoleSpice """ raise NotImplementedError() def get_rdp_console(self, context, instance): """Get connection info for a rdp console. :param context: security context :param instance: nova.objects.instance.Instance :returns an instance of console.type.ConsoleRDP """ raise NotImplementedError() def get_serial_console(self, context, instance): """Get connection info for a serial console. :param context: security context :param instance: nova.objects.instance.Instance :returns an instance of console.type.ConsoleSerial """ raise NotImplementedError() def get_mks_console(self, context, instance): """Get connection info for a MKS console. :param context: security context :param instance: nova.objects.instance.Instance :returns an instance of console.type.ConsoleMKS """ raise NotImplementedError() def get_diagnostics(self, instance): """Return diagnostics data about the given instance. :param nova.objects.instance.Instance instance: The instance to which the diagnostic data should be returned. :return: Has a big overlap to the return value of the newer interface :func:`get_instance_diagnostics` :rtype: dict """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_instance_diagnostics(self, instance): """Return diagnostics data about the given instance. :param nova.objects.instance.Instance instance: The instance to which the diagnostic data should be returned. :return: Has a big overlap to the return value of the older interface :func:`get_diagnostics` :rtype: nova.virt.diagnostics.Diagnostics """ raise NotImplementedError() def get_all_bw_counters(self, instances): """Return bandwidth usage counters for each interface on each running VM. :param instances: nova.objects.instance.InstanceList """ raise NotImplementedError() def get_all_volume_usage(self, context, compute_host_bdms): """Return usage info for volumes attached to vms on a given host.- """ raise NotImplementedError() def get_host_ip_addr(self): """Retrieves the IP address of the dom0 """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): """Attach the disk to the instance at mountpoint using info.""" raise NotImplementedError() def detach_volume(self, connection_info, instance, mountpoint, encryption=None): """Detach the disk attached to the instance.""" raise NotImplementedError() def swap_volume(self, old_connection_info, new_connection_info, instance, mountpoint, resize_to): """Replace the volume attached to the given `instance`. :param dict old_connection_info: The volume for this connection gets detached from the given `instance`. :param dict new_connection_info: The volume for this connection gets attached to the given 'instance'. :param nova.objects.instance.Instance instance: The instance whose volume gets replaced by another one. :param str mountpoint: The mountpoint in the instance where the volume for `old_connection_info` is attached to. :param int resize_to: If the new volume is larger than the old volume, it gets resized to the given size (in Gigabyte) of `resize_to`. :return: None """ raise NotImplementedError() def attach_interface(self, instance, image_meta, vif): """Use hotplug to add a network interface to a running instance. The counter action to this is :func:`detach_interface`. :param nova.objects.instance.Instance instance: The instance which will get an additional network interface. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param nova.network.model.NetworkInfo vif: The object which has the information about the interface to attach. :raise nova.exception.NovaException: If the attach fails. :return: None """ raise NotImplementedError() def detach_interface(self, instance, vif): """Use hotunplug to remove a network interface from a running instance. The counter action to this is :func:`attach_interface`. :param nova.objects.instance.Instance instance: The instance which gets a network interface removed. :param nova.network.model.NetworkInfo vif: The object which has the information about the interface to detach. :raise nova.exception.NovaException: If the detach fails. :return: None """ raise NotImplementedError() def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None, timeout=0, retry_interval=0): """Transfers the disk of a running instance in multiple phases, turning off the instance before the end. :param nova.objects.instance.Instance instance: The instance whose disk should be migrated. :param str dest: The IP address of the destination host. :param nova.objects.flavor.Flavor flavor: The flavor of the instance whose disk get migrated. :param nova.network.model.NetworkInfo network_info: The network information of the given `instance`. :param dict block_device_info: Information about the block devices. :param int timeout: The time in seconds to wait for the guest OS to shutdown. :param int retry_interval: How often to signal guest while waiting for it to shutdown. :return: A list of disk information dicts in JSON format. :rtype: str """ raise NotImplementedError() def snapshot(self, context, instance, image_id, update_task_state): """Snapshots the specified instance. :param context: security context :param instance: nova.objects.instance.Instance :param image_id: Reference to a pre-created image that will hold the snapshot. """ raise NotImplementedError() def post_interrupted_snapshot_cleanup(self, context, instance): """Cleans up any resources left after an interrupted snapshot. :param context: security context :param instance: nova.objects.instance.Instance """ pass def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): """Completes a resize/migration. :param context: the context for the migration/resize :param migration: the migrate/resize information :param instance: nova.objects.instance.Instance being migrated/resized :param disk_info: the newly transferred disk information :param network_info: instance network information :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param resize_instance: True if the instance is being resized, False otherwise :param block_device_info: instance volume block device info :param power_on: True if the instance should be powered on, False otherwise """ raise NotImplementedError() def confirm_migration(self, migration, instance, network_info): """Confirms a resize/migration, destroying the source VM. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): """Finish reverting a resize/migration. :param context: the context for the finish_revert_migration :param instance: nova.objects.instance.Instance being migrated/resized :param network_info: instance network information :param block_device_info: instance volume block device info :param power_on: True if the instance should be powered on, False otherwise """ raise NotImplementedError() def pause(self, instance): """Pause the given instance. A paused instance doesn't use CPU cycles of the host anymore. The state of the VM could be stored in the memory or storage space of the host, depending on the underlying hypervisor technology. A "stronger" version of `pause` is :func:'suspend'. The counter action for `pause` is :func:`unpause`. :param nova.objects.instance.Instance instance: The instance which should be paused. :return: None """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unpause(self, instance): """Unpause the given paused instance. The paused instance gets unpaused and will use CPU cycles of the host again. The counter action for 'unpause' is :func:`pause`. Depending on the underlying hypervisor technology, the guest has the same state as before the 'pause'. :param nova.objects.instance.Instance instance: The instance which should be unpaused. :return: None """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def suspend(self, context, instance): """Suspend the specified instance. A suspended instance doesn't use CPU cycles or memory of the host anymore. The state of the instance could be persisted on the host and allocate storage space this way. A "softer" way of `suspend` is :func:`pause`. The counter action for `suspend` is :func:`resume`. :param nova.context.RequestContext context: The context for the suspend. :param nova.objects.instance.Instance instance: The instance to suspend. :return: None """ raise NotImplementedError() def resume(self, context, instance, network_info, block_device_info=None): """resume the specified suspended instance. The suspended instance gets resumed and will use CPU cycles and memory of the host again. The counter action for 'resume' is :func:`suspend`. Depending on the underlying hypervisor technology, the guest has the same state as before the 'suspend'. :param nova.context.RequestContext context: The context for the resume. :param nova.objects.instance.Instance instance: The suspended instance to resume. :param nova.network.model.NetworkInfo network_info: Necessary network information for the resume. :param dict block_device_info: Instance volume block device info. :return: None """ raise NotImplementedError() def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance. :param nova.context.RequestContext context: The context for the rescue. :param nova.objects.instance.Instance instance: The instance being rescued. :param nova.network.model.NetworkInfo network_info: Necessary network information for the resume. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param rescue_password: <PASSWORD> for rescue. """ raise NotImplementedError() def set_bootable(self, instance, is_bootable): """Set the ability to power on/off an instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def unrescue(self, instance, network_info): """Unrescue the specified instance. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance. :param instance: nova.objects.instance.Instance :param timeout: time to wait for GuestOS to shutdown :param retry_interval: How often to signal guest while waiting for it to shutdown """ raise NotImplementedError() def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def trigger_crash_dump(self, instance): """Trigger crash dump mechanism on the given instance. Stalling instances can be triggered to dump the crash data. How the guest OS reacts in details, depends on the configuration of it. :param nova.objects.instance.Instance instance: The instance where the crash dump should be triggered. :return: None """ raise NotImplementedError() def soft_delete(self, instance): """Soft delete the specified instance. A soft-deleted instance doesn't allocate any resources anymore, but is still available as a database entry. The counter action :func:`restore` uses the database entry to create a new instance based on that. :param nova.objects.instance.Instance instance: The instance to soft-delete. :return: None """ raise NotImplementedError() def restore(self, instance): """Restore the specified soft-deleted instance. The restored instance will be automatically booted. The counter action for `restore` is :func:`soft_delete`. :param nova.objects.instance.Instance instance: The soft-deleted instance which should be restored from the soft-deleted data. :return: None """ raise NotImplementedError() def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: node which the caller want to get resources from a driver that manages only one node can safely ignore this :returns: Dictionary describing resources """ raise NotImplementedError() def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data=None): """Prepare an instance for live migration :param context: security context :param instance: nova.objects.instance.Instance object :param block_device_info: instance block device information :param network_info: instance network information :param disk_info: instance disk information :param migrate_data: a LiveMigrateData object """ raise NotImplementedError() def live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Live migration of an instance to another host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager._post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager._rollback_live_migration. :param block_migration: if true, migrate VM disk. :param migrate_data: a LiveMigrateData object """ raise NotImplementedError() def live_migration_force_complete(self, instance): """Force live migration to complete :param instance: Instance being live migrated """ raise NotImplementedError() def live_migration_abort(self, instance): """Abort an in-progress live migration. :param instance: instance that is live migrating """ raise NotImplementedError() def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info, destroy_disks=True, migrate_data=None): """Clean up destination node after a failed live migration. :param context: security context :param instance: instance object that was being migrated :param network_info: instance network information :param block_device_info: instance block device information :param destroy_disks: if true, destroy disks at destination during cleanup :param migrate_data: a LiveMigrateData object """ raise NotImplementedError() def post_live_migration(self, context, instance, block_device_info, migrate_data=None): """Post operation of live migration at source host. :param context: security context :instance: instance object that was migrated :block_device_info: instance block device information :param migrate_data: a LiveMigrateData object """ pass def post_live_migration_at_source(self, context, instance, network_info): """Unplug VIFs from networks at source. :param context: security context :param instance: instance object reference :param network_info: instance network information """ raise NotImplementedError(_("Hypervisor driver does not support " "post_live_migration_at_source method")) def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, block_device_info=None): """Post operation of live migration at destination host. :param context: security context :param instance: instance object that is migrated :param network_info: instance network information :param block_migration: if true, post operation of block_migration. """ raise NotImplementedError() def check_instance_shared_storage_local(self, context, instance): """Check if instance files located on shared storage. This runs check on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: nova.objects.instance.Instance object """ raise NotImplementedError() def check_instance_shared_storage_remote(self, context, data): """Check if instance files located on shared storage. :param context: security context :param data: result of check_instance_shared_storage_local """ raise NotImplementedError() def check_instance_shared_storage_cleanup(self, context, data): """Do cleanup on host after check_instance_shared_storage calls :param context: security context :param data: result of check_instance_shared_storage_local """ pass def check_can_live_migrate_destination(self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param src_compute_info: Info about the sending machine :param dst_compute_info: Info about the receiving machine :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a LiveMigrateData object (hypervisor-dependent) """ raise NotImplementedError() def cleanup_live_migration_destination_check(self, context, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param context: security context :param dest_check_data: result of check_can_live_migrate_destination """ raise NotImplementedError() def check_can_live_migrate_source(self, context, instance, dest_check_data, block_device_info=None): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination :param block_device_info: result of _get_instance_block_device_info :returns: a LiveMigrateData object """ raise NotImplementedError() def get_instance_disk_info(self, instance, block_device_info=None): """Retrieve information about actual disk sizes of an instance. :param instance: nova.objects.Instance :param block_device_info: Optional; Can be used to filter out devices which are actually volumes. :return: json strings with below format:: "[{'path':'disk', 'type':'raw', 'virt_disk_size':'10737418240', 'backing_file':'backing_file', 'disk_size':'83886080' 'over_committed_disk_size':'10737418240'}, ...]" """ raise NotImplementedError() def refresh_security_group_rules(self, security_group_id): """This method is called after a change to security groups. All security groups and their associated rules live in the datastore, and calling this method should apply the updated rules to instances running the specified security group. An error should be raised if the operation cannot complete. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_instance_security_rules(self, instance): """Refresh security group rules Gets called when an instance gets added to or removed from the security group the instance is a member of or if the group gains or loses a rule. """ raise NotImplementedError() def reset_network(self, instance): """reset networking for specified instance.""" # TODO(Vek): Need to pass context in for access to auth_token pass def ensure_filtering_rules_for_instance(self, instance, network_info): """Setting up filtering rules and waiting for its completion. To migrate an instance, filtering rules to hypervisors and firewalls are inevitable on destination host. ( Waiting only for filtering rules to hypervisor, since filtering rules to firewall rules can be set faster). Concretely, the below method must be called. - setup_basic_filtering (for nova-basic, etc.) - prepare_instance_filter(for nova-instance-instance-xxx, etc.) to_xml may have to be called since it defines PROJNET, PROJMASK. but libvirt migrates those value through migrateToURI(), so , no need to be called. Don't use thread for this method since migration should not be started when setting-up filtering rules operations are not completed. :param instance: nova.objects.instance.Instance object """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def filter_defer_apply_on(self): """Defer application of IPTables rules.""" pass def filter_defer_apply_off(self): """Turn off deferral of IPTables rules and apply the rules now.""" pass def unfilter_instance(self, instance, network_info): """Stop filtering instance.""" # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def set_admin_password(self, instance, new_pass): """Set the root password on the specified instance. :param instance: nova.objects.instance.Instance :param new_pass: the <PASSWORD> """ raise NotImplementedError() def inject_file(self, instance, b64_path, b64_contents): """Writes a file on the specified instance. The first parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. The second parameter is the base64-encoded path to which the file is to be written on the instance; the third is the contents of the file, also base64-encoded. NOTE(russellb) This method is deprecated and will be removed once it can be removed from nova.compute.manager. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def change_instance_metadata(self, context, instance, diff): """Applies a diff to the instance metadata. This is an optional driver method which is used to publish changes to the instance's metadata to the hypervisor. If the hypervisor has no means of publishing the instance metadata to the instance, then this method should not be implemented. :param context: security context :param instance: nova.objects.instance.Instance """ pass def inject_network_info(self, instance, nw_info): """inject network info for specified instance.""" # TODO(Vek): Need to pass context in for access to auth_token pass def poll_rebooting_instances(self, timeout, instances): """Perform a reboot on all given 'instances'. Reboots the given `instances` which are longer in the rebooting state than `timeout` seconds. :param int timeout: The timeout (in seconds) for considering rebooting instances to be stuck. :param list instances: A list of nova.objects.instance.Instance objects that have been in rebooting state longer than the configured timeout. :return: None """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def host_power_action(self, action): """Reboots, shuts down or powers up the host. :param str action: The action the host should perform. The valid actions are: ""startup", "shutdown" and "reboot". :return: The result of the power action :rtype: : str """ raise NotImplementedError() def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers the migration of all instances to other hosts. Consider the combination with :func:`set_host_enabled`. :param str host: The name of the host whose maintenance mode should be changed. :param bool mode: If `True`, go into maintenance mode. If `False`, leave the maintenance mode. :return: "on_maintenance" if switched to maintenance mode or "off_maintenance" if maintenance mode got left. :rtype: str """ raise NotImplementedError() def set_host_enabled(self, enabled): """Sets the ability of this host to accept new instances. :param bool enabled: If this is `True`, the host will accept new instances. If it is `False`, the host won't accept new instances. :return: If the host can accept further instances, return "enabled", if further instances shouldn't be scheduled to this host, return "disabled". :rtype: str """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_host_uptime(self): """Returns the result of calling the Linux command `uptime` on this host. :return: A text which contains the uptime of this host since the last boot. :rtype: str """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def plug_vifs(self, instance, network_info): """Plug virtual interfaces (VIFs) into the given `instance` at instance boot time. The counter action is :func:`unplug_vifs`. :param nova.objects.instance.Instance instance: The instance which gets VIFs plugged. :param nova.network.model.NetworkInfo network_info: The object which contains information about the VIFs to plug. :return: None """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unplug_vifs(self, instance, network_info): # NOTE(markus_z): 2015-08-18 # The compute manager doesn't use this interface, which seems odd # since the manager should be the controlling thing here. """Unplug virtual interfaces (VIFs) from networks. The counter action is :func:`plug_vifs`. :param nova.objects.instance.Instance instance: The instance which gets VIFs unplugged. :param nova.network.model.NetworkInfo network_info: The object which contains information about the VIFs to unplug. :return: None """ raise NotImplementedError() def get_host_cpu_stats(self): """Get the currently known host CPU stats. :returns: a dict containing the CPU stat info, eg: | {'kernel': kern, | 'idle': idle, | 'user': user, | 'iowait': wait, | 'frequency': freq}, where kern and user indicate the cumulative CPU time (nanoseconds) spent by kernel and user processes respectively, idle indicates the cumulative idle CPU time (nanoseconds), wait indicates the cumulative I/O wait CPU time (nanoseconds), since the host is booting up; freq indicates the current CPU frequency (MHz). All values are long integers. """ raise NotImplementedError() def block_stats(self, instance, disk_id): """Return performance counters associated with the given disk_id on the given instance. These are returned as [rd_req, rd_bytes, wr_req, wr_bytes, errs], where rd indicates read, wr indicates write, req is the total number of I/O requests made, bytes is the total number of bytes transferred, and errs is the number of requests held up due to a full pipeline. All counters are long integers. This method is optional. On some platforms (e.g. XenAPI) performance statistics can be retrieved directly in aggregate form, without Nova having to do the aggregation. On those platforms, this method is unused. Note that this function takes an instance ID. """ raise NotImplementedError() def deallocate_networks_on_reschedule(self, instance): """Does the driver want networks deallocated on reschedule?""" return False def macs_for_instance(self, instance): """What MAC addresses must this instance have? Some hypervisors (such as bare metal) cannot do freeform virtualization of MAC addresses. This method allows drivers to return a set of MAC addresses that the instance is to have. allocate_for_instance will take this into consideration when provisioning networking for the instance. Mapping of MAC addresses to actual networks (or permitting them to be freeform) is up to the network implementation layer. For instance, with openflow switches, fixed MAC addresses can still be virtualized onto any L2 domain, with arbitrary VLANs etc, but regular switches require pre-configured MAC->network mappings that will match the actual configuration. Most hypervisors can use the default implementation which returns None. Hypervisors with MAC limits should return a set of MAC addresses, which will be supplied to the allocate_for_instance call by the compute manager, and it is up to that call to ensure that all assigned network details are compatible with the set of MAC addresses. This is called during spawn_instance by the compute manager. :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])). None means 'no constraints', a set means 'these and only these MAC addresses'. """ return None def dhcp_options_for_instance(self, instance): """Get DHCP options for this instance. Some hypervisors (such as bare metal) require that instances boot from the network, and manage their own TFTP service. This requires passing the appropriate options out to the DHCP service. Most hypervisors can use the default implementation which returns None. This is called during spawn_instance by the compute manager. Note that the format of the return value is specific to the Neutron client API. :return: None, or a set of DHCP options, eg: | [{'opt_name': 'bootfile-name', | 'opt_value': '/tftpboot/path/to/config'}, | {'opt_name': 'server-ip-address', | 'opt_value': '192.168.127.12'}, | {'opt_name': 'tftp-server', | 'opt_value': '192.168.127.12'} | ] """ return None def manage_image_cache(self, context, all_instances): """Manage the driver's local image cache. Some drivers chose to cache images for instances on disk. This method is an opportunity to do management of that cache which isn't directly related to other calls into the driver. The prime example is to clean the cache and remove images which are no longer of interest. :param all_instances: nova.objects.instance.InstanceList """ pass def add_to_aggregate(self, context, aggregate, host, **kwargs): """Add a compute host to an aggregate. The counter action to this is :func:`remove_from_aggregate` :param nova.context.RequestContext context: The security context. :param nova.objects.aggregate.Aggregate aggregate: The aggregate which should add the given `host` :param str host: The name of the host to add to the given `aggregate`. :param dict kwargs: A free-form thingy... :return: None """ # NOTE(jogo) Currently only used for XenAPI-Pool raise NotImplementedError() def remove_from_aggregate(self, context, aggregate, host, **kwargs): """Remove a compute host from an aggregate. The counter action to this is :func:`add_to_aggregate` :param nova.context.RequestContext context: The security context. :param nova.objects.aggregate.Aggregate aggregate: The aggregate which should remove the given `host` :param str host: The name of the host to remove from the given `aggregate`. :param dict kwargs: A free-form thingy... :return: None """ raise NotImplementedError() def undo_aggregate_operation(self, context, op, aggregate, host, set_error=True): """Undo for Resource Pools.""" raise NotImplementedError() def get_volume_connector(self, instance): """Get connector information for the instance for attaching to volumes. Connector information is a dictionary representing the ip of the machine that will be making the connection, the name of the iscsi initiator and the hostname of the machine as follows:: { 'ip': ip, 'initiator': initiator, 'host': hostname } """ raise NotImplementedError() def get_available_nodes(self, refresh=False): """Returns nodenames of all nodes managed by the compute service. This method is for multi compute-nodes support. If a driver supports multi compute-nodes, this method returns a list of nodenames managed by the service. Otherwise, this method should return [hypervisor_hostname]. """ raise NotImplementedError() def node_is_available(self, nodename): """Return whether this compute service manages a particular node.""" if nodename in self.get_available_nodes(): return True # Refresh and check again. return nodename in self.get_available_nodes(refresh=True) def get_per_instance_usage(self): """Get information about instance resource usage. :returns: dict of nova uuid => dict of usage info """ return {} def instance_on_disk(self, instance): """Checks access of instance files on the host. :param instance: nova.objects.instance.Instance to lookup Returns True if files of an instance with the supplied ID accessible on the host, False otherwise. .. note:: Used in rebuild for HA implementation and required for validation of access to instance shared disk files """ return False def register_event_listener(self, callback): """Register a callback to receive events. Register a callback to receive asynchronous event notifications from hypervisors. The callback will be invoked with a single parameter, which will be an instance of the nova.virt.event.Event class. """ self._compute_event_callback = callback def emit_event(self, event): """Dispatches an event to the compute manager. Invokes the event callback registered by the compute manager to dispatch the event. This must only be invoked from a green thread. """ if not self._compute_event_callback: LOG.debug("Discarding event %s", str(event)) return if not isinstance(event, virtevent.Event): raise ValueError( _("Event must be an instance of nova.virt.event.Event")) try: LOG.debug("Emitting event %s", str(event)) self._compute_event_callback(event) except Exception as ex: LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"), {'event': event, 'ex': ex}) def delete_instance_files(self, instance): """Delete any lingering instance files for an instance. :param instance: nova.objects.instance.Instance :returns: True if the instance was deleted from disk, False otherwise. """ return True @property def need_legacy_block_device_info(self): """Tell the caller if the driver requires legacy block device info. Tell the caller whether we expect the legacy format of block device info to be passed in to methods that expect it. """ return True def volume_snapshot_create(self, context, instance, volume_id, create_info): """Snapshots volumes attached to a specified instance. The counter action to this is :func:`volume_snapshot_delete` :param nova.context.RequestContext context: The security context. :param nova.objects.instance.Instance instance: The instance that has the volume attached :param uuid volume_id: Volume to be snapshotted :param create_info: The data needed for nova to be able to attach to the volume. This is the same data format returned by Cinder's initialize_connection() API call. In the case of doing a snapshot, it is the image file Cinder expects to be used as the active disk after the snapshot operation has completed. There may be other data included as well that is needed for creating the snapshot. """ raise NotImplementedError() def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info): """Deletes a snapshot of a volume attached to a specified instance. The counter action to this is :func:`volume_snapshot_create` :param nova.context.RequestContext context: The security context. :param nova.objects.instance.Instance instance: The instance that has the volume attached. :param uuid volume_id: Attached volume associated with the snapshot :param uuid snapshot_id: The snapshot to delete. :param dict delete_info: Volume backend technology specific data needed to be able to complete the snapshot. For example, in the case of qcow2 backed snapshots, this would include the file being merged, and the file being merged into (if appropriate). :return: None """ raise NotImplementedError() def default_root_device_name(self, instance, image_meta, root_bdm): """Provide a default root device name for the driver. :param nova.objects.instance.Instance instance: The instance to get the root device for. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param nova.objects.BlockDeviceMapping root_bdm: The description of the root device. """ raise NotImplementedError() def default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): """Default the missing device names in the block device mapping.""" raise NotImplementedError() def get_device_name_for_instance(self, instance, bdms, block_device_obj): """Get the next device name based on the block device mapping. :param instance: nova.objects.instance.Instance that volume is requesting a device name :param bdms: a nova.objects.BlockDeviceMappingList for the instance :param block_device_obj: A nova.objects.BlockDeviceMapping instance with all info about the requested block device. device_name does not need to be set, and should be decided by the driver implementation if not set. :returns: The chosen device name. """ raise NotImplementedError() def is_supported_fs_format(self, fs_type): """Check whether the file format is supported by this driver :param fs_type: the file system type to be checked, the validate values are defined at disk API module. """ # NOTE(jichenjc): Return False here so that every hypervisor # need to define their supported file system # type and implement this function at their # virt layer. return False def quiesce(self, context, instance, image_meta): """Quiesce the specified instance to prepare for snapshots. If the specified instance doesn't support quiescing, InstanceQuiesceNotSupported is raised. When it fails to quiesce by other errors (e.g. agent timeout), NovaException is raised. :param context: request context :param instance: nova.objects.instance.Instance to be quiesced :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. """ raise NotImplementedError() def unquiesce(self, context, instance, image_meta): """Unquiesce the specified instance after snapshots. If the specified instance doesn't support quiescing, InstanceQuiesceNotSupported is raised. When it fails to quiesce by other errors (e.g. agent timeout), NovaException is raised. :param context: request context :param instance: nova.objects.instance.Instance to be unquiesced :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. """ raise NotImplementedError() def network_binding_host_id(self, context, instance): """Get host ID to associate with network ports. :param context: request context :param instance: nova.objects.instance.Instance that the network ports will be associated with :returns: a string representing the host ID """ return instance.get('host') def load_compute_driver(virtapi, compute_driver=None): """Load a compute driver module. Load the compute driver module specified by the compute_driver configuration option or, if supplied, the driver name supplied as an argument. Compute drivers constructors take a VirtAPI object as their first object and this must be supplied. :param virtapi: a VirtAPI instance :param compute_driver: a compute driver name to override the config opt :returns: a ComputeDriver instance """ if not compute_driver: compute_driver = CONF.compute_driver if not compute_driver: LOG.error(_LE("Compute driver option required, but not specified")) sys.exit(1) LOG.info(_LI("Loading compute driver '%s'"), compute_driver) try: driver = importutils.import_object( 'nova.virt.%s' % compute_driver, virtapi) return utils.check_isinstance(driver, ComputeDriver) except ImportError: LOG.exception(_LE("Unable to load the virtualization driver")) sys.exit(1) def is_xenapi(): return CONF.compute_driver == 'xenapi.XenAPIDriver'
2.09375
2
otp/chat/ChatInputNormal.py
P1ayerOne/src
0
162
from direct.showbase import DirectObject from otp.otpbase import OTPGlobals import sys from direct.gui.DirectGui import * from pandac.PandaModules import * from otp.otpbase import OTPLocalizer class ChatInputNormal(DirectObject.DirectObject): ExecNamespace = None def __init__(self, chatMgr): self.chatMgr = chatMgr self.normalPos = Vec3(-1.083, 0, 0.804) self.whisperPos = Vec3(0.0, 0, 0.71) self.whisperAvatarName = None self.whisperAvatarId = None self.toPlayer = 0 wantHistory = 0 if __dev__: wantHistory = 1 self.wantHistory = base.config.GetBool('want-chat-history', wantHistory) self.history = [''] self.historySize = base.config.GetInt('chat-history-size', 10) self.historyIndex = 0 return def typeCallback(self, extraArgs): messenger.send('enterNormalChat') def delete(self): self.ignore('arrow_up-up') self.ignore('arrow_down-up') self.chatFrame.destroy() del self.chatFrame del self.chatButton del self.cancelButton del self.chatEntry del self.whisperLabel del self.chatMgr def activateByData(self, whisperAvatarId = None, toPlayer = 0): self.toPlayer = toPlayer self.whisperAvatarId = whisperAvatarId self.whisperAvatarName = base.talkAssistant.findName(self.whisperAvatarId, self.toPlayer) if self.whisperAvatarId: self.chatFrame.setPos(self.whisperPos) self.whisperLabel['text'] = OTPLocalizer.ChatInputWhisperLabel % self.whisperAvatarName self.whisperLabel.show() else: self.chatFrame.setPos(self.normalPos) self.whisperLabel.hide() self.chatEntry['focus'] = 1 self.chatFrame.show() if self.wantHistory: self.accept('arrow_up-up', self.getPrevHistory) self.accept('arrow_down-up', self.getNextHistory) def deactivate(self): self.chatEntry.set('') self.chatEntry['focus'] = 0 self.chatFrame.hide() self.whisperLabel.hide() base.win.closeIme() self.ignore('arrow_up-up') self.ignore('arrow_down-up') def checkForOverRide(self): return False def sendChat(self, text): if self.checkForOverRide(): self.chatEntry.enterText('') return self.deactivate() self.chatMgr.fsm.request('mainMenu') if text: if self.toPlayer: if self.whisperAvatarId: self.whisperAvatarName = None self.whisperAvatarId = None self.toPlayer = 0 elif self.whisperAvatarId: self.chatMgr.sendWhisperString(text, self.whisperAvatarId) self.whisperAvatarName = None self.whisperAvatarId = None else: if self.chatMgr.execChat: if text[0] == '>': text = self.__execMessage(text[1:]) base.localAvatar.setChatAbsolute(text, CFSpeech | CFTimeout) return base.talkAssistant.sendOpenTalk(text) if self.wantHistory: self.addToHistory(text) return def chatOverflow(self, overflowText): self.sendChat(self.chatEntry.get()) def __execMessage(self, message): if not ChatInputNormal.ExecNamespace: ChatInputNormal.ExecNamespace = {} exec('from pandac.PandaModules import *', globals(), self.ExecNamespace) self.importExecNamespace() try: if not isClient(): print('EXECWARNING ChatInputNormal eval: %s' % message) printStack() return str(eval(message, globals(), ChatInputNormal.ExecNamespace)) except SyntaxError: try: if not isClient(): print('EXECWARNING ChatInputNormal exec: %s' % message) printStack() exec(message, globals(), ChatInputNormal.ExecNamespace) return 'ok' except: exception = sys.exc_info()[0] extraInfo = sys.exc_info()[1] if extraInfo: return str(extraInfo) else: return str(exception) except: exception = sys.exc_info()[0] extraInfo = sys.exc_info()[1] if extraInfo: return str(extraInfo) else: return str(exception) def cancelButtonPressed(self): self.chatEntry.set('') self.chatMgr.fsm.request('mainMenu') def chatButtonPressed(self): self.sendChat(self.chatEntry.get()) def importExecNamespace(self): pass def addToHistory(self, text): self.history = [text] + self.history[:self.historySize - 1] self.historyIndex = 0 def getPrevHistory(self): self.chatEntry.set(self.history[self.historyIndex]) self.historyIndex += 1 self.historyIndex %= len(self.history) def getNextHistory(self): self.chatEntry.set(self.history[self.historyIndex]) self.historyIndex -= 1 self.historyIndex %= len(self.history) def setPos(self, posX, posY = None, posZ = None): if posX and posY and posZ: self.chatFrame.setPos(posX, posY, posZ) else: self.chatFrame.setPos(posX)
1.984375
2
train_text_summarizer.py
stevaras2/bert
1
163
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) # np.append(features,length,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filename, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.append(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.mean(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) C = [0.1,1,2,5,10] solver = ['newton-cg','saga','sag'] best_params = dict() best_score = 0.0 for c in C: for s in solver: start = time.time() log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c) log.fit(X_train, y_train) predictions = log.predict(X_val) print("###########################################") print("LR with C =",c,'and solver = ',s) print("Results using embeddings from the", layer_json, "file") print(classification_report(y_val, predictions)) f1 = f1_score(y_val, predictions) if f1 > best_score: best_score = f1 best_params['c'] = c best_params['solver'] = s print("F1 score using Logistic Regression:",f1) print("###########################################") end = time.time() running_time = end - start print("Running time:"+str(running_time)) def visualize_DNN(file_to_save): ''' Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd :param file_to_save: the png file that the architecture of the DNN will be saved. :return: None ''' model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) plot_model(model, to_file=file_to_save, show_shapes=True) def save_model(sentences_list,layer_json,dataset_csv,pkl): dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list, layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1) features = np.column_stack([features, length]) print(features.shape) log = LogisticRegression(random_state=0, solver='saga', max_iter=1000, C=1) log.fit(features, label) _ = joblib.dump(log, pkl, compress=9) if __name__ == '__main__': #save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','summarizer1.pkl') ap = argparse.ArgumentParser() ap.add_argument("-s", "--sentences", required=True, help="sentences list") ap.add_argument("-o", "--output", required=True, help="output") ap.add_argument("-ts", "--train set", required=True, help="path to train set") ap.add_argument("-sp", "--summarizer path", required=True, help="path to save summarizer") args = vars(ap.parse_args()) layer = train_classifier(args['sentences'], args['output'], args['train set'],args['summarizer path']) #layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl') #layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl') #layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl') #layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl') #tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv') #layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl') #layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl') #layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl') #layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
2.59375
3
src/test/python/apache/aurora/executor/test_status_manager.py
zmanji/incubator-aurora
0
164
<filename>src/test/python/apache/aurora/executor/test_status_manager.py<gh_stars>0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from unittest import TestCase import mock from mesos.interface.mesos_pb2 import TaskState from apache.aurora.executor.common.status_checker import StatusChecker from apache.aurora.executor.status_manager import StatusManager class FakeStatusChecker(StatusChecker): def __init__(self): self.call_count = 0 @property def status(self): if self.call_count == 2: return TaskState.Value('TASK_KILLED') self.call_count += 1 return None class TestStatusManager(TestCase): def setUp(self): self.callback_called = False def test_run(self): checker = FakeStatusChecker() def callback(result): assert result == TaskState.Value('TASK_KILLED') self.callback_called = True mock_time = mock.Mock(spec=time) status_manager = StatusManager(checker, callback, mock_time) status_manager.run() assert mock_time.sleep.call_count == 2 assert self.callback_called is True
2.28125
2
Supplemental/A5. Collision estimation module/Con_est.py
wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
0
165
''' ------------------------------------------------------------------------------------------------- This code accompanies the paper titled "Human injury-based safety decision of automated vehicles" Author: <NAME>, <NAME>, <NAME>, <NAME> Corresponding author: <NAME> (<EMAIL>) ------------------------------------------------------------------------------------------------- ''' import torch import numpy as np from torch import nn from torch.nn.utils import weight_norm __author__ = "<NAME>" def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param): ''' Estimate the collision condition. ''' (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle))) if -1e-6 < delta_angle_2 < 1e-6: delta_angle_2 = 1e-6 delta_v1_list = [] delta_v2_list = [] # Estimate the collision condition (delat-v) according to the principal impact direction. for veh_striking in veh_striking_list: if veh_striking[0] == 1: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgs[1] - veh_striking[3]) veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2)) if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]: veh_e = 2 / veh_RDS else: veh_e = 0.5 / veh_RDS elif veh_striking[0] == 2: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgf[1] - veh_striking[3]) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2)) veh_RDS = V1_v * np.sin(delta_angle_2) veh_e = 1.5 / veh_RDS elif veh_striking[0] == 3: veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1]) veh_a1 = np.abs(veh_cgs[0] - veh_striking[3]) veh_RDS = np.abs(V2_v * np.cos(delta_angle) - V1_v) veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(veh_ca + delta_angle_2)) if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]: veh_e = 2 / veh_RDS else: veh_e = 0.5 / veh_RDS elif veh_striking[0] == 4: veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1]) veh_a1 = np.abs(veh_cgf[0] - veh_striking[3]) veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2)) veh_RDS = V2_v * np.sin(delta_angle_2) veh_e = 1.5 / veh_RDS # Obtain delta-v based on the plane 2-DOF rigid-body collision model with momentum conservation. veh_y1 = veh_k[0] ** 2 / (veh_a1 ** 2 + veh_k[0] ** 2) veh_y2 = veh_k[1] ** 2 / (veh_a2 ** 2 + veh_k[1] ** 2) delta_v1 = (1 + veh_e) * veh_m[1] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2) delta_v2 = (1 + veh_e) * veh_m[0] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2) delta_v1_list.append(delta_v1) delta_v2_list.append(delta_v2) delta_v1_ = max(delta_v1_list) delta_v2_ = max(delta_v2_list) index = delta_v1_list.index(max(delta_v1_list)) return delta_v1_, delta_v2_, index
2.578125
3
train/train.py
TontonTremblay/pixel-nerf
0
166
<filename>train/train.py # Training to a set of multiple objects (e.g. ShapeNet or DTU) # tensorboard logs available in logs/<expname> import sys import os sys.path.insert( 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")) ) import warnings import trainlib from model import make_model, loss from render import NeRFRenderer from data import get_split_dataset import util import numpy as np import torch.nn.functional as F import torch from dotmap import DotMap def extra_args(parser): parser.add_argument( "--batch_size", "-B", type=int, default=4, help="Object batch size ('SB')" ) parser.add_argument( "--nviews", "-V", type=str, default="1", help="Number of source views (multiview); put multiple (space delim) to pick randomly per batch ('NV')", ) parser.add_argument( "--freeze_enc", action="store_true", default=None, help="Freeze encoder weights and only train MLP", ) parser.add_argument( "--no_bbox_step", type=int, default=100000, help="Step to stop using bbox sampling", ) parser.add_argument( "--fixed_test", action="store_true", default=None, help="Freeze encoder weights and only train MLP", ) return parser args, conf = util.args.parse_args(extra_args, training=True, default_ray_batch_size=128) device = util.get_cuda(args.gpu_id[0]) dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir) print( "dset z_near {}, z_far {}, lindisp {}".format(dset.z_near, dset.z_far, dset.lindisp) ) net = make_model(conf["model"]).to(device=device) net.stop_encoder_grad = args.freeze_enc if args.freeze_enc: print("Encoder frozen") net.encoder.eval() renderer = NeRFRenderer.from_conf(conf["renderer"], lindisp=dset.lindisp,).to( device=device ) # Parallize render_par = renderer.bind_parallel(net, args.gpu_id).eval() nviews = list(map(int, args.nviews.split())) class PixelNeRFTrainer(trainlib.Trainer): def __init__(self): super().__init__(net, dset, val_dset, args, conf["train"], device=device) self.renderer_state_path = "%s/%s/_renderer" % ( self.args.checkpoints_path, self.args.name, ) self.lambda_coarse = conf.get_float("loss.lambda_coarse") self.lambda_fine = conf.get_float("loss.lambda_fine", 1.0) print( "lambda coarse {} and fine {}".format(self.lambda_coarse, self.lambda_fine) ) self.rgb_coarse_crit = loss.get_rgb_loss(conf["loss.rgb"], True) fine_loss_conf = conf["loss.rgb"] if "rgb_fine" in conf["loss"]: print("using fine loss") fine_loss_conf = conf["loss.rgb_fine"] self.rgb_fine_crit = loss.get_rgb_loss(fine_loss_conf, False) if args.resume: if os.path.exists(self.renderer_state_path): renderer.load_state_dict( torch.load(self.renderer_state_path, map_location=device) ) self.z_near = dset.z_near self.z_far = dset.z_far self.use_bbox = args.no_bbox_step > 0 def post_batch(self, epoch, batch): renderer.sched_step(args.batch_size) def extra_save_state(self): torch.save(renderer.state_dict(), self.renderer_state_path) def calc_losses(self, data, is_train=True, global_step=0): if "images" not in data: return {} all_images = data["images"].to(device=device) # (SB, NV, 3, H, W) SB, NV, _, H, W = all_images.shape all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4) all_bboxes = data.get("bbox") # (SB, NV, 4) cmin rmin cmax rmax all_focals = data["focal"] # (SB) all_c = data.get("c") # (SB) if self.use_bbox and global_step >= args.no_bbox_step: self.use_bbox = False print(">>> Stopped using bbox sampling @ iter", global_step) if not is_train or not self.use_bbox: all_bboxes = None all_rgb_gt = [] all_rays = [] curr_nviews = nviews[torch.randint(0, len(nviews), ()).item()] if curr_nviews == 1: image_ord = torch.randint(0, NV, (SB, 1)) else: image_ord = torch.empty((SB, curr_nviews), dtype=torch.long) for obj_idx in range(SB): if all_bboxes is not None: bboxes = all_bboxes[obj_idx] images = all_images[obj_idx] # (NV, 3, H, W) poses = all_poses[obj_idx] # (NV, 4, 4) focal = all_focals[obj_idx] c = None if "c" in data: c = data["c"][obj_idx] if curr_nviews > 1: # Somewhat inefficient, don't know better way image_ord[obj_idx] = torch.from_numpy( np.random.choice(NV, curr_nviews, replace=False) ) images_0to1 = images * 0.5 + 0.5 cam_rays = util.gen_rays( poses, W, H, focal, self.z_near, self.z_far, c=c ) # (NV, H, W, 8) rgb_gt_all = images_0to1 rgb_gt_all = ( rgb_gt_all.permute(0, 2, 3, 1).contiguous().reshape(-1, 3) ) # (NV, H, W, 3) if all_bboxes is not None: pix = util.bbox_sample(bboxes, args.ray_batch_size) pix_inds = pix[..., 0] * H * W + pix[..., 1] * W + pix[..., 2] else: pix_inds = torch.randint(0, NV * H * W, (args.ray_batch_size,)) rgb_gt = rgb_gt_all[pix_inds] # (ray_batch_size, 3) rays = cam_rays.view(-1, cam_rays.shape[-1])[pix_inds].to( device=device ) # (ray_batch_size, 8) all_rgb_gt.append(rgb_gt) all_rays.append(rays) all_rgb_gt = torch.stack(all_rgb_gt) # (SB, ray_batch_size, 3) all_rays = torch.stack(all_rays) # (SB, ray_batch_size, 8) image_ord = image_ord.to(device) src_images = util.batched_index_select_nd( all_images, image_ord ) # (SB, NS, 3, H, W) src_poses = util.batched_index_select_nd(all_poses, image_ord) # (SB, NS, 4, 4) all_bboxes = all_poses = all_images = None net.encode( src_images, src_poses, all_focals.to(device=device), c=all_c.to(device=device) if all_c is not None else None, ) render_dict = DotMap(render_par(all_rays, want_weights=True,)) coarse = render_dict.coarse fine = render_dict.fine using_fine = len(fine) > 0 loss_dict = {} rgb_loss = self.rgb_coarse_crit(coarse.rgb, all_rgb_gt) if rgb_loss.isnan().any().item()==True: raise() loss_dict["rc"] = rgb_loss.item() * self.lambda_coarse if using_fine: fine_loss = self.rgb_fine_crit(fine.rgb, all_rgb_gt) rgb_loss = rgb_loss * self.lambda_coarse + fine_loss * self.lambda_fine loss_dict["rf"] = fine_loss.item() * self.lambda_fine loss = rgb_loss if is_train: loss.backward() loss_dict["t"] = loss.item() return loss_dict def train_step(self, data, global_step): return self.calc_losses(data, is_train=True, global_step=global_step) def eval_step(self, data, global_step): renderer.eval() losses = self.calc_losses(data, is_train=False, global_step=global_step) renderer.train() return losses def vis_step(self, data, global_step, idx=None): if "images" not in data: return {} if idx is None: batch_idx = np.random.randint(0, data["images"].shape[0]) else: print(idx) batch_idx = idx images = data["images"][batch_idx].to(device=device) # (NV, 3, H, W) poses = data["poses"][batch_idx].to(device=device) # (NV, 4, 4) focal = data["focal"][batch_idx : batch_idx + 1] # (1) c = data.get("c") if c is not None: c = c[batch_idx : batch_idx + 1] # (1) NV, _, H, W = images.shape cam_rays = util.gen_rays( poses, W, H, focal, self.z_near, self.z_far, c=c ) # (NV, H, W, 8) images_0to1 = images * 0.5 + 0.5 # (NV, 3, H, W) curr_nviews = nviews[torch.randint(0, len(nviews), (1,)).item()] views_src = np.sort(np.random.choice(NV, curr_nviews, replace=False)) view_dest = np.random.randint(0, NV - curr_nviews) for vs in range(curr_nviews): view_dest += view_dest >= views_src[vs] views_src = torch.from_numpy(views_src) # set renderer net to eval mode renderer.eval() source_views = ( images_0to1[views_src] .permute(0, 2, 3, 1) .cpu() .numpy() .reshape(-1, H, W, 3) ) gt = images_0to1[view_dest].permute(1, 2, 0).cpu().numpy().reshape(H, W, 3) with torch.no_grad(): test_rays = cam_rays[view_dest] # (H, W, 8) test_images = images[views_src] # (NS, 3, H, W) net.encode( test_images.unsqueeze(0), poses[views_src].unsqueeze(0), focal.to(device=device), c=c.to(device=device) if c is not None else None, ) test_rays = test_rays.reshape(1, H * W, -1) render_dict = DotMap(render_par(test_rays, want_weights=True)) coarse = render_dict.coarse fine = render_dict.fine using_fine = len(fine) > 0 alpha_coarse_np = coarse.weights[0].sum(dim=-1).cpu().numpy().reshape(H, W) rgb_coarse_np = coarse.rgb[0].cpu().numpy().reshape(H, W, 3) depth_coarse_np = coarse.depth[0].cpu().numpy().reshape(H, W) if using_fine: alpha_fine_np = fine.weights[0].sum(dim=1).cpu().numpy().reshape(H, W) depth_fine_np = fine.depth[0].cpu().numpy().reshape(H, W) rgb_fine_np = fine.rgb[0].cpu().numpy().reshape(H, W, 3) print("c rgb min {} max {}".format(rgb_coarse_np.min(), rgb_coarse_np.max())) print( "c alpha min {}, max {}".format( alpha_coarse_np.min(), alpha_coarse_np.max() ) ) alpha_coarse_cmap = util.cmap(alpha_coarse_np) / 255 depth_coarse_cmap = util.cmap(depth_coarse_np) / 255 vis_list = [ *source_views, gt, depth_coarse_cmap, rgb_coarse_np, alpha_coarse_cmap, ] vis_coarse = np.hstack(vis_list) vis = vis_coarse if using_fine: print("f rgb min {} max {}".format(rgb_fine_np.min(), rgb_fine_np.max())) print( "f alpha min {}, max {}".format( alpha_fine_np.min(), alpha_fine_np.max() ) ) depth_fine_cmap = util.cmap(depth_fine_np) / 255 alpha_fine_cmap = util.cmap(alpha_fine_np) / 255 vis_list = [ *source_views, gt, depth_fine_cmap, rgb_fine_np, alpha_fine_cmap, ] vis_fine = np.hstack(vis_list) vis = np.vstack((vis_coarse, vis_fine)) rgb_psnr = rgb_fine_np else: rgb_psnr = rgb_coarse_np psnr = util.psnr(rgb_psnr, gt) vals = {"psnr": psnr} print("psnr", psnr) # set the renderer network back to train mode renderer.train() return vis, vals trainer = PixelNeRFTrainer() trainer.start()
2.09375
2
napari/_qt/dialogs/qt_plugin_dialog.py
kne42/napari
0
167
<filename>napari/_qt/dialogs/qt_plugin_dialog.py import os import sys from pathlib import Path from typing import Sequence from napari_plugin_engine.dist import standard_metadata from napari_plugin_engine.exceptions import PluginError from qtpy.QtCore import QEvent, QProcess, QProcessEnvironment, QSize, Qt, Slot from qtpy.QtGui import QFont, QMovie from qtpy.QtWidgets import ( QCheckBox, QDialog, QFrame, QHBoxLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem, QPushButton, QSizePolicy, QSplitter, QTextEdit, QVBoxLayout, QWidget, ) import napari.resources from ...plugins import plugin_manager from ...plugins.pypi import ( ProjectInfo, iter_napari_plugin_info, normalized_name, ) from ...utils._appdirs import user_plugin_dir, user_site_packages from ...utils.misc import parse_version, running_as_bundled_app from ...utils.translations import trans from ..qthreading import create_worker from ..widgets.qt_eliding_label import ElidingLabel from ..widgets.qt_plugin_sorter import QtPluginSorter from .qt_plugin_report import QtPluginErrReporter # TODO: add error icon and handle pip install errors # TODO: add queue to handle clicks when already processing class Installer: def __init__(self, output_widget: QTextEdit = None): from ...plugins import plugin_manager # create install process self._output_widget = None self.process = QProcess() self.process.setProgram(sys.executable) self.process.setProcessChannelMode(QProcess.MergedChannels) self.process.readyReadStandardOutput.connect(self._on_stdout_ready) # setup process path env = QProcessEnvironment() combined_paths = os.pathsep.join( [user_site_packages(), env.systemEnvironment().value("PYTHONPATH")] ) env.insert("PYTHONPATH", combined_paths) # use path of parent process env.insert( "PATH", QProcessEnvironment.systemEnvironment().value("PATH") ) self.process.setProcessEnvironment(env) self.process.finished.connect(lambda: plugin_manager.discover()) self.process.finished.connect(lambda: plugin_manager.prune()) self.set_output_widget(output_widget) def set_output_widget(self, output_widget: QTextEdit): if output_widget: self._output_widget = output_widget self.process.setParent(output_widget) def _on_stdout_ready(self): if self._output_widget: text = self.process.readAllStandardOutput().data().decode() self._output_widget.append(text) def install(self, pkg_list: Sequence[str]): cmd = ['-m', 'pip', 'install', '--upgrade'] if running_as_bundled_app() and sys.platform.startswith('linux'): cmd += [ '--no-warn-script-location', '--prefix', user_plugin_dir(), ] self.process.setArguments(cmd + list(pkg_list)) if self._output_widget: self._output_widget.clear() self.process.start() def uninstall(self, pkg_list: Sequence[str]): args = ['-m', 'pip', 'uninstall', '-y'] self.process.setArguments(args + list(pkg_list)) if self._output_widget: self._output_widget.clear() self.process.start() for pkg in pkg_list: plugin_manager.unregister(pkg) class PluginListItem(QFrame): def __init__( self, package_name: str, version: str = '', url: str = '', summary: str = '', author: str = '', license: str = "UNKNOWN", *, plugin_name: str = None, parent: QWidget = None, enabled: bool = True, ): super().__init__(parent) self.setup_ui(enabled) if plugin_name: self.plugin_name.setText(plugin_name) self.package_name.setText(f"{package_name} {version}") self.summary.setText(summary) self.package_author.setText(author) self.action_button.setText(trans._("uninstall")) self.action_button.setObjectName("remove_button") self.enabled_checkbox.setChecked(enabled) if PluginError.get(plugin_name=plugin_name): def _show_error(): rep = QtPluginErrReporter( parent=self._get_dialog(), initial_plugin=plugin_name ) rep.setWindowFlags(Qt.Sheet) close = QPushButton(trans._("close"), rep) rep.layout.addWidget(close) rep.plugin_combo.hide() close.clicked.connect(rep.close) rep.open() self.error_indicator.clicked.connect(_show_error) self.error_indicator.show() self.summary.setIndent(18) else: self.summary.setIndent(38) else: self.plugin_name.setText(package_name) self.package_name.setText(version) self.summary.setText(summary) self.package_author.setText(author) self.action_button.setText(trans._("install")) self.enabled_checkbox.hide() def _get_dialog(self) -> QDialog: p = self.parent() while not isinstance(p, QDialog) and p.parent(): p = p.parent() return p def setup_ui(self, enabled=True): self.v_lay = QVBoxLayout(self) self.v_lay.setContentsMargins(-1, 6, -1, 6) self.v_lay.setSpacing(0) self.row1 = QHBoxLayout() self.row1.setSpacing(6) self.enabled_checkbox = QCheckBox(self) self.enabled_checkbox.setChecked(enabled) self.enabled_checkbox.stateChanged.connect(self._on_enabled_checkbox) self.enabled_checkbox.setToolTip(trans._("enable/disable")) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.enabled_checkbox.sizePolicy().hasHeightForWidth() ) self.enabled_checkbox.setSizePolicy(sizePolicy) self.enabled_checkbox.setMinimumSize(QSize(20, 0)) self.enabled_checkbox.setText("") self.row1.addWidget(self.enabled_checkbox) self.plugin_name = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.plugin_name.sizePolicy().hasHeightForWidth() ) self.plugin_name.setSizePolicy(sizePolicy) font15 = QFont() font15.setPointSize(15) self.plugin_name.setFont(font15) self.row1.addWidget(self.plugin_name) self.package_name = QLabel(self) self.package_name.setAlignment( Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter ) self.row1.addWidget(self.package_name) self.action_button = QPushButton(self) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.action_button.sizePolicy().hasHeightForWidth() ) self.action_button.setSizePolicy(sizePolicy) self.row1.addWidget(self.action_button) self.v_lay.addLayout(self.row1) self.row2 = QHBoxLayout() self.error_indicator = QPushButton() self.error_indicator.setObjectName("warning_icon") self.error_indicator.setCursor(Qt.PointingHandCursor) self.error_indicator.hide() self.row2.addWidget(self.error_indicator) self.row2.setContentsMargins(-1, 4, 0, -1) self.summary = ElidingLabel(parent=self) sizePolicy = QSizePolicy( QSizePolicy.MinimumExpanding, QSizePolicy.Preferred ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.summary.sizePolicy().hasHeightForWidth() ) self.summary.setSizePolicy(sizePolicy) self.summary.setObjectName("small_text") self.row2.addWidget(self.summary) self.package_author = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.package_author.sizePolicy().hasHeightForWidth() ) self.package_author.setSizePolicy(sizePolicy) self.package_author.setObjectName("small_text") self.row2.addWidget(self.package_author) self.v_lay.addLayout(self.row2) def _on_enabled_checkbox(self, state: int): """Called with `state` when checkbox is clicked.""" plugin_manager.set_blocked(self.plugin_name.text(), not state) class QPluginList(QListWidget): def __init__(self, parent: QWidget, installer: Installer): super().__init__(parent) self.installer = installer self.setSortingEnabled(True) @Slot(ProjectInfo) def addItem( self, project_info: ProjectInfo, plugin_name=None, enabled=True ): # don't add duplicates if ( self.findItems(project_info.name, Qt.MatchFixedString) and not plugin_name ): return # including summary here for sake of filtering below. searchable_text = project_info.name + " " + project_info.summary item = QListWidgetItem(searchable_text, parent=self) item.version = project_info.version super().addItem(item) widg = PluginListItem( *project_info, parent=self, plugin_name=plugin_name, enabled=enabled, ) method = getattr( self.installer, 'uninstall' if plugin_name else 'install' ) widg.action_button.clicked.connect(lambda: method([project_info.name])) item.setSizeHint(widg.sizeHint()) self.setItemWidget(item, widg) @Slot(ProjectInfo) def tag_outdated(self, project_info: ProjectInfo): for item in self.findItems(project_info.name, Qt.MatchFixedString): current = item.version latest = project_info.version if parse_version(current) >= parse_version(latest): continue if hasattr(item, 'outdated'): # already tagged it continue item.outdated = True widg = self.itemWidget(item) update_btn = QPushButton( trans._("update (v{latest})", latest=latest), widg ) update_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) update_btn.clicked.connect( lambda: self.installer.install([item.text()]) ) widg.row1.insertWidget(3, update_btn) def filter(self, text: str): """Filter items to those containing `text`.""" shown = self.findItems(text, Qt.MatchContains) for i in range(self.count()): item = self.item(i) item.setHidden(item not in shown) class QtPluginDialog(QDialog): def __init__(self, parent=None): super().__init__(parent) self.installer = Installer() self.setup_ui() self.installer.set_output_widget(self.stdout_text) self.installer.process.started.connect(self._on_installer_start) self.installer.process.finished.connect(self._on_installer_done) self.refresh() def _on_installer_start(self): self.show_status_btn.setChecked(True) self.working_indicator.show() self.process_error_indicator.hide() def _on_installer_done(self, exit_code, exit_status): self.working_indicator.hide() if exit_code: self.process_error_indicator.show() else: self.show_status_btn.setChecked(False) self.refresh() self.plugin_sorter.refresh() def refresh(self): self.installed_list.clear() self.available_list.clear() # fetch installed from ...plugins import plugin_manager plugin_manager.discover() # since they might not be loaded yet already_installed = set() for plugin_name, mod_name, distname in plugin_manager.iter_available(): # not showing these in the plugin dialog if plugin_name in ('napari_plugin_engine',): continue if distname: already_installed.add(distname) meta = standard_metadata(distname) else: meta = {} self.installed_list.addItem( ProjectInfo( normalized_name(distname or ''), meta.get('version', ''), meta.get('url', ''), meta.get('summary', ''), meta.get('author', ''), meta.get('license', ''), ), plugin_name=plugin_name, enabled=plugin_name in plugin_manager.plugins, ) # self.v_splitter.setSizes([70 * self.installed_list.count(), 10, 10]) # fetch available plugins self.worker = create_worker(iter_napari_plugin_info) def _handle_yield(project_info): if project_info.name in already_installed: self.installed_list.tag_outdated(project_info) else: self.available_list.addItem(project_info) self.worker.yielded.connect(_handle_yield) self.worker.finished.connect(self.working_indicator.hide) self.worker.finished.connect(self._update_count_in_label) self.worker.start() def setup_ui(self): self.resize(1080, 640) vlay_1 = QVBoxLayout(self) self.h_splitter = QSplitter(self) vlay_1.addWidget(self.h_splitter) self.h_splitter.setOrientation(Qt.Horizontal) self.v_splitter = QSplitter(self.h_splitter) self.v_splitter.setOrientation(Qt.Vertical) self.v_splitter.setMinimumWidth(500) self.plugin_sorter = QtPluginSorter(parent=self.h_splitter) self.plugin_sorter.layout().setContentsMargins(2, 0, 0, 0) self.plugin_sorter.hide() installed = QWidget(self.v_splitter) lay = QVBoxLayout(installed) lay.setContentsMargins(0, 2, 0, 2) self.installed_label = QLabel(trans._("Installed Plugins")) self.installed_filter = QLineEdit() self.installed_filter.setPlaceholderText("search...") self.installed_filter.setMaximumWidth(350) self.installed_filter.setClearButtonEnabled(True) mid_layout = QHBoxLayout() mid_layout.addWidget(self.installed_label) mid_layout.addWidget(self.installed_filter) mid_layout.addStretch() lay.addLayout(mid_layout) self.installed_list = QPluginList(installed, self.installer) self.installed_filter.textChanged.connect(self.installed_list.filter) lay.addWidget(self.installed_list) uninstalled = QWidget(self.v_splitter) lay = QVBoxLayout(uninstalled) lay.setContentsMargins(0, 2, 0, 2) self.avail_label = QLabel(trans._("Available Plugins")) self.avail_filter = QLineEdit() self.avail_filter.setPlaceholderText("search...") self.avail_filter.setMaximumWidth(350) self.avail_filter.setClearButtonEnabled(True) mid_layout = QHBoxLayout() mid_layout.addWidget(self.avail_label) mid_layout.addWidget(self.avail_filter) mid_layout.addStretch() lay.addLayout(mid_layout) self.available_list = QPluginList(uninstalled, self.installer) self.avail_filter.textChanged.connect(self.available_list.filter) lay.addWidget(self.available_list) self.stdout_text = QTextEdit(self.v_splitter) self.stdout_text.setReadOnly(True) self.stdout_text.setObjectName("pip_install_status") self.stdout_text.hide() buttonBox = QHBoxLayout() self.working_indicator = QLabel(trans._("loading ..."), self) sp = self.working_indicator.sizePolicy() sp.setRetainSizeWhenHidden(True) self.working_indicator.setSizePolicy(sp) self.process_error_indicator = QLabel(self) self.process_error_indicator.setObjectName("error_label") self.process_error_indicator.hide() load_gif = str(Path(napari.resources.__file__).parent / "loading.gif") mov = QMovie(load_gif) mov.setScaledSize(QSize(18, 18)) self.working_indicator.setMovie(mov) mov.start() self.direct_entry_edit = QLineEdit(self) self.direct_entry_edit.installEventFilter(self) self.direct_entry_edit.setPlaceholderText( trans._('install by name/url, or drop file...') ) self.direct_entry_btn = QPushButton(trans._("Install"), self) self.direct_entry_btn.clicked.connect(self._install_packages) self.show_status_btn = QPushButton(trans._("Show Status"), self) self.show_status_btn.setFixedWidth(100) self.show_sorter_btn = QPushButton(trans._("<< Show Sorter"), self) self.close_btn = QPushButton(trans._("Close"), self) self.close_btn.clicked.connect(self.accept) buttonBox.addWidget(self.show_status_btn) buttonBox.addWidget(self.working_indicator) buttonBox.addWidget(self.direct_entry_edit) buttonBox.addWidget(self.direct_entry_btn) buttonBox.addWidget(self.process_error_indicator) buttonBox.addSpacing(60) buttonBox.addWidget(self.show_sorter_btn) buttonBox.addWidget(self.close_btn) buttonBox.setContentsMargins(0, 0, 4, 0) vlay_1.addLayout(buttonBox) self.show_status_btn.setCheckable(True) self.show_status_btn.setChecked(False) self.show_status_btn.toggled.connect(self._toggle_status) self.show_sorter_btn.setCheckable(True) self.show_sorter_btn.setChecked(False) self.show_sorter_btn.toggled.connect(self._toggle_sorter) self.v_splitter.setStretchFactor(1, 2) self.h_splitter.setStretchFactor(0, 2) self.avail_filter.setFocus() def _update_count_in_label(self): count = self.available_list.count() self.avail_label.setText( trans._("Available Plugins ({count})", count=count) ) def eventFilter(self, watched, event): if event.type() == QEvent.DragEnter: # we need to accept this event explicitly to be able # to receive QDropEvents! event.accept() if event.type() == QEvent.Drop: md = event.mimeData() if md.hasUrls(): files = [url.toLocalFile() for url in md.urls()] self.direct_entry_edit.setText(files[0]) return True return super().eventFilter(watched, event) def _toggle_sorter(self, show): if show: self.show_sorter_btn.setText(trans._(">> Hide Sorter")) self.plugin_sorter.show() else: self.show_sorter_btn.setText(trans._("<< Show Sorter")) self.plugin_sorter.hide() def _toggle_status(self, show): if show: self.show_status_btn.setText(trans._("Hide Status")) self.stdout_text.show() else: self.show_status_btn.setText(trans._("Show Status")) self.stdout_text.hide() def _install_packages(self, packages: Sequence[str] = ()): if not packages: _packages = self.direct_entry_edit.text() if os.path.exists(_packages): packages = [_packages] else: packages = _packages.split() self.direct_entry_edit.clear() if packages: self.installer.install(packages) if __name__ == "__main__": from qtpy.QtWidgets import QApplication app = QApplication([]) w = QtPluginDialog() w.show() app.exec_()
1.765625
2
hata/discord/webhook/utils.py
WizzyBots/hata
1
168
__all__ = ('create_partial_webhook_from_id', ) from scarletio import export from ..core import USERS from .preinstanced import WebhookType from .webhook import Webhook @export def create_partial_webhook_from_id(webhook_id, token, *, type_=WebhookType.bot, channel_id=0): """ Creates a partial webhook from the given parameters. If the webhook with the given `webhook_id` already exists, then returns that instead. Parameters ---------- webhook_id : `int` The identifier number of the webhook. token : `str` The token of the webhook. type_ : ``WebhookType`` = `WebhookType.bot`, Optional (Keyword only) The webhook's type. Defaults to `WebhookType.bot`. channel_id : `int` = `0`, Optional (Keyword only) The webhook's channel's identifier. Defaults to `0`. Returns ------- webhook : ``Webhook`` """ try: webhook = USERS[webhook_id] except KeyError: webhook = Webhook._create_empty(webhook_id) webhook.channel_id = channel_id webhook.type = type_ USERS[webhook_id] = webhook webhook.token = token return webhook
2.515625
3
MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py
henriqueumeda/-Python-study
0
169
SCRABBLE_LETTER_VALUES = { 'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10 } def getWordScore(word, n): """ Returns the score for a word. Assumes the word is a valid word. The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, PLUS 50 points if all n letters are used on the first turn. Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES) word: string (lowercase letters) n: integer (HAND_SIZE; i.e., hand size required for additional points) returns: int >= 0 """ total_points = 0 for letter in word: total_points += SCRABBLE_LETTER_VALUES[letter] total_points *= len(word) if len(word) == n: total_points += 50 return total_points print(getWordScore('waybill', 7))
4.03125
4
autolatex-master/exemplos_codigo/certificados/certificados.py
luizgui05/autolatex.
0
170
<filename>autolatex-master/exemplos_codigo/certificados/certificados.py import os import sys import sqlite3 con = None filename = 'certificado' # Abrir banco de dados para ler nomes. try: con = sqlite3.connect('math.db') cur = con.cursor() cur.execute('select * from math') data = cur.fetchall() except sqlite3.Error, e: print "Error %s:" % e.args[0] sys.exit(1) finally: if con: con.close() # Gerar um certificado para cada nome. for row in data: f = open(filename+'.tex','r+') old = f.readlines() if old[0][1:4] == 'def': offset = 1 else: offset = 0 f.seek(0) f.write('\\def\\name {'+row[0]+'}\n') f.writelines(old[offset:]) f.close() # Compilar arquivo LaTeX try: os.system('pdflatex '+filename+'.tex') os.system('mv '+filename+'.pdf '+filename+'_'+row[0].replace(' ','_')+'.pdf') #os.system('xdg-open '+filename+'.pdf &') except OSError: print('LaTeX not installed.')
2.609375
3
nanoepiseg/main_list_chunks.py
snajder-r/nanoepiseg
0
171
<reponame>snajder-r/nanoepiseg from pathlib import Path from meth5.meth5 import MetH5File def main(m5file:Path, chunk_size:int, quiet:bool): with MetH5File(m5file, "r", chunk_size=chunk_size) as f: for chrom in f.get_chromosomes(): print(f"{chrom}: {f[chrom].get_number_of_chunks()}")
2.734375
3
qscatv2/make_seasonal_images.py
tmilliman/sir_to_netcdf
0
172
<reponame>tmilliman/sir_to_netcdf #!/usr/bin/env python # script to make seasonal means and stddev images of 4-day sig0 # values. import os import sys import glob import numpy as np import sirpy2 as sp2 import argparse from osgeo import gdal DATADIR = "./" NODATA_VALUE = -9999.0 Q2M = { "JAS": list(range(7, 10)), "OND": list(range(10, 13)), "JFM": list(range(1, 4)), "AMJ": list(range(4, 7)), } # this allows GDAL to throw Python Exceptions gdal.UseExceptions() def db2pr(dbvalue): pr = 10 ** (dbvalue / 10.0) return pr if __name__ == "__main__": # set up arguments parser = argparse.ArgumentParser( "script to make quarterly " + "means and stdevs of qscat dB values" ) parser.add_argument( "-v", "--verbose", help="increase output verbosity", action="store_true", default=False, ) parser.add_argument( "-q", "--quarter", nargs="?", choices=("JAS", "OND", "JFM", "AMJ"), default="JAS", const="JAS", help="Quarter for aggregation. Default=JAS", ) parser.add_argument("region", help="BYU region string (e.g. SAm, NAm, Ama, etc.)") parser.add_argument( "year", type=int, help="Year e.g. 1999 (qscat data start in 1999)" ) args = parser.parse_args() verbose = args.verbose year = args.year quarter = args.quarter # region list (LAEA regions only) valid_region_list = [ "Grn", "Ala", "CAm", "NAm", "SAm", "NAf", "SAf", "Sib", "Eur", "SAs", "ChJ", "Ind", "Aus", "Ber", ] region = args.region try: region_index = valid_region_list.index(region) except Exception: sys.stderr.write("Region not valid.\n") sys.stderr.write("Valid regions are:\n") sys.stderr.write("{}\n".format(valid_region_list)) sys.exit(1) if verbose: print("region: {}".format(region)) print("year: {}".format(year)) print("quarter: {}".format(quarter)) # set data dir indir = os.path.join(DATADIR, "geotiffs", region, str(year)) outdir = indir if year == 1999: year2 = 99 else: year2 = "{:02d}".format(year - 2000) monthlist = Q2M[quarter] # make a list of files for this year filepatt = "quev-a-{}{}-*.tif".format(region, year2) globpatt = os.path.join(indir, filepatt) if verbose: print("glob pattern: {}".format(globpatt)) filelist = glob.glob(globpatt) qlist = [] for filepath in filelist: fn = os.path.basename(filepath) if verbose: print(fn) fn_dt = sp2.fn2dt(fn, date_flag="center") iyear = fn_dt.year imonth = fn_dt.month iday = fn_dt.day if imonth in monthlist: qlist.append(fn) if verbose: print("{}: {}-{}-{}".format(fn, iyear, imonth, iday)) print("{}-{}: {}".format(year, quarter, qlist)) if len(qlist) == 0: warnmsg = "No images found for this quarter.\n" sys.stdout.write(warnmsg) sys.exit(0) # loop over images for this quarter db_quarter = [] for i, image in enumerate(qlist): a_imgpath = os.path.join(indir, image) try: a_ds = gdal.Open(a_imgpath) except Exception: print("Unable to open {}".format(a_imgpath)) sys.exit(1) try: srcband = a_ds.GetRasterBand(1) except Exception: print("Band ({}) not found".format(1)) sys.exit(1) a_data = srcband.ReadAsArray() a_mask = a_data == NODATA_VALUE # if this is the first image get projection and geotransform if i == 0: prj = a_ds.GetProjection() gt = a_ds.GetGeoTransform() ny, nx = a_data.shape db_data = a_data db_masked = np.ma.MaskedArray(db_data, a_mask) # add image to db_quarter list db_quarter.append(db_masked) # close datasets a_ds = None # stack list into array and find mean and std dbarray = np.ma.stack(db_quarter, axis=2) dbmean = np.ma.mean(dbarray, axis=2) dbstd = np.ma.std(dbarray, axis=2) print(dbmean.shape) # finally, save as a geotiff output_format = "GTiff" driver = gdal.GetDriverByName(output_format) dst_filename = "{}-quev-mean-db-{}-{}.tif" dst_filename = dst_filename.format(region, year, quarter) dst_dir = os.path.join(DATADIR, "geotiffs", region, str(year)) dst_path = os.path.join(dst_dir, dst_filename) if verbose: print("Output file for sig0 means: {}".format(dst_path)) dst_ds = driver.Create(dst_path, nx, ny, 1, gdal.GDT_Float32) dst_data = np.ma.filled(dbmean, fill_value=NODATA_VALUE) dst_ds.GetRasterBand(1).WriteArray(dst_data) dst_ds.GetRasterBand(1).SetNoDataValue(NODATA_VALUE) print("gt: {}".format(gt)) dst_ds.SetGeoTransform(gt) dst_ds.SetProjection(prj) dst_ds = None dbmean_min = dbmean.min() dbmean_max = dbmean.max() dbmean_median = np.ma.median(dbmean) print("Quarterly ({}) Mean Stats".format(quarter)) print(" Min: {}".format(dbmean_min)) print(" Max: {}".format(dbmean_max)) print(" Median: {}".format(dbmean_median)) # repeat for standard deviation output_format = "GTiff" driver = gdal.GetDriverByName(output_format) dst_filename = "{}-quev-std-db-{}-{}.tif".format(region, year, quarter) dst_dir = os.path.join(DATADIR, "geotiffs", region, str(year)) dst_path = os.path.join(dst_dir, dst_filename) if verbose: print("Output file: {}".format(dst_path)) dst_ds = driver.Create(dst_path, nx, ny, 1, gdal.GDT_Float32) dst_data = np.ma.filled(dbstd, fill_value=NODATA_VALUE) dst_ds.GetRasterBand(1).WriteArray(dst_data) dst_ds.GetRasterBand(1).SetNoDataValue(NODATA_VALUE) print("gt: {}".format(gt)) dst_ds.SetGeoTransform(gt) dst_ds.SetProjection(prj) dst_ds = None dbstd_min = dbstd.min() dbstd_max = dbstd.max() dbstd_median = np.ma.median(dbstd) print("Quarterly ({}) Stdev Stats".format(quarter)) print(" Min: {}".format(dbstd_min)) print(" Max: {}".format(dbstd_max)) print(" Median: {}".format(dbstd_median))
2.453125
2
ex062.py
noahbarros/Python-Exercises
1
173
primeiro = int(input('Digite o priemiro termo da PA: ')) razão = int(input('Digite a razão da PA: ')) termo = primeiro cont = 1 total = 0 mais = 10 while mais != 0: total += mais while cont <= total: print(f'{termo} ', end='') termo += razão cont += 1 print('Pausa') mais = int(input('Quantos termos você quer usar a mais? ')) print(f'a progressão foi finalizada com {total} termos mostrados')
3.953125
4
arxml_data_extractor/handler/object_handler.py
Brokdar/ArxmlDataExtractor
16
174
<reponame>Brokdar/ArxmlDataExtractor from lxml.etree import Element, QName from typing import Union, List, Any from tqdm import tqdm import logging from arxml_data_extractor.handler import value_handler from arxml_data_extractor.handler.path_handler import PathHandler from arxml_data_extractor.asr.asr_parser import AsrParser from arxml_data_extractor.query.data_query import DataQuery from arxml_data_extractor.query.data_object import DataObject from arxml_data_extractor.query.data_value import DataValue class ObjectHandler(): def __init__(self, parser: AsrParser): self.logger = logging.getLogger() self.path_handler = PathHandler(parser) def handle(self, data_object: DataObject, node: Element = None) -> Union[list, dict]: is_not_root = True if node is None: is_not_root = False node = self.path_handler.parser.root if is_not_root: self.logger.info(f'ObjectHandler - handle DataObject(\'{data_object.name}\')') else: self.logger.info(f'ObjectHandler - [root] handle DataObject(\'{data_object.name}\')') values = [] elements = self.path_handler.elements_by_path(data_object.path, node) for element in tqdm( elements, desc=f'Handle DataObject(\'{data_object.name}\')', disable=is_not_root, bar_format="{desc:<70}{percentage:3.0f}% |{bar:70}| {n_fmt:>4}/{total_fmt}"): if element is not None: self.logger.info( f'ObjectHandler - element found: \'{QName(element).localname}\' at line {element.sourceline - 1}' ) values.append(self.__handle_values(data_object.values, element)) if not values: self.logger.warning( f'ObjectHandler - no values found for DataObject(\'{data_object.name}\')') else: self.logger.info( f'ObjectHandler - values found for DataObject(\'{data_object.name}\'): {len(values)}' ) return values[0] if len(values) == 1 else values def __handle_values(self, values: List[Union[DataValue, DataObject]], node: Element) -> dict: results = {} for value in values: if isinstance(value, DataObject): results[value.name] = self.handle(value, node) elif isinstance(value, DataValue): results[value.name] = self.__handle_value(value.query, node) if results[value.name] is None: self.logger.info( f'ObjectHandler - no value found for DataValue(\'{value.name}\')') else: self.logger.info( f'ObjectHandler - value found: DataValue(\'{value.name}\') = \'{results[value.name]}\'' ) else: error = f'ObjectHandler - invalid value type ({type(value)}). Value must be of type DataObject or DataValue' self.logger.error(error) raise TypeError(error) return results def __handle_value(self, query: DataQuery, node: Element) -> Any: if isinstance(query.path, DataQuery.XPath): if query.path.is_reference: element = self.path_handler.element_by_inline_ref(query.path, node) else: element = self.path_handler.element_by_xpath(query.path.xpath, node) else: # DataQuery.Reference isn't allowed on DataValue return None if element is None: return None return value_handler.handle(query, element)
2.203125
2
src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py
Chromico/bk-base
84
175
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from copy import deepcopy from datamanage.pro import exceptions as dm_pro_errors from datamanage.utils.api import MetaApi from datamanage.pro.utils.time import utc_to_local, str_to_datetime from datamanage.pro.lifecycle.models_dict import ( DATASET_CREATE_MAPPINGS, DATASET_CREATE_EVENT_INFO_DICT, DataTraceShowType, ComplexSearchBackendType, DataTraceFinishStatus, ) def get_dataset_create_info(dataset_id, dataset_type): """获取数据足迹中和数据创建相关信息 :param dataset_id: 数据id :param dataset_type: 数据类型 :return: 数据创建相关信息 :rtype: list """ # 1)从dgraph中获取数据创建相关信息 data_set_create_info_statement = """ { get_dataset_create_info(func: eq(%s, "%s")){created_by created_at} } """ % ( DATASET_CREATE_MAPPINGS[dataset_type]['data_set_pk'], dataset_id, ) query_result = MetaApi.complex_search( {"backend_type": ComplexSearchBackendType.DGRAPH.value, "statement": data_set_create_info_statement}, raw=True ) create_info_ret = query_result['data']['data']['get_dataset_create_info'] if not (isinstance(create_info_ret, list) and create_info_ret): raise dm_pro_errors.GetDataSetCreateInfoError(message_kv={'dataset_id': dataset_id}) # 2)得到格式化创建信息 create_trace_dict = deepcopy(DATASET_CREATE_EVENT_INFO_DICT) create_trace_dict.update( { "sub_type": dataset_type, "sub_type_alias": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'], "description": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'], "created_at": utc_to_local(create_info_ret[0]['created_at']), "created_by": create_info_ret[0]['created_by'], "show_type": DataTraceShowType.DISPLAY.value, "datetime": str_to_datetime(utc_to_local(create_info_ret[0]['created_at'])), "status": DataTraceFinishStatus.STATUS, "status_alias": DataTraceFinishStatus.STATUS_ALIAS, } ) return [create_trace_dict]
1.164063
1
vectors2.py
shivam13verma/judge-embeddings
0
176
import locale import glob import os import os.path import requests import tarfile import sys import re import gensim from gensim.models.doc2vec import TaggedDocument from collections import namedtuple from gensim.models import Doc2Vec import gensim.models.doc2vec from collections import OrderedDict import multiprocessing from gensim.test.test_doc2vec import ConcatenatedDoc2Vec import pickle reload(sys) sys.setdefaultencoding("utf-8") #dirname = '/scratch/ap4608/judge_data' #locale.setlocale(locale.LC_ALL, 'C') # # ## Convert text to lower-case and strip punctuation/symbols from words #def normalize_text(text): # norm_text = text.lower() # # # Replace breaks with spaces # norm_text = norm_text.replace('<br />', ' ') # # # Pad punctuation with spaces on both sides # for char in ['.', '"', ',', '(', ')', '!', '?', ';', ':']: # norm_text = norm_text.replace(char, ' ' + char + ' ') # # return norm_text # # ## Concat and normalize test/train data #folders = os.listdir(dirname) #alldata = '' # #for fol in folders: # temp = '' # output = fol.replace('/', '-') + '.txt' # # # Is there a better pattern to use? # txt_files = glob.glob('/'.join([dirname, fol, '*.txt'])) # # for txt in txt_files: # with open(txt, 'r') as t: # control_chars = [chr(0x85)] # t_clean = t.read() # # t_clean = t_clean.replace('\n', ' ') # t_clean = re.sub(r'[^\x00-\x7F]+',' ', t_clean) # # for c in control_chars: # t_clean = t_clean.replace(c, ' ') # # temp += t_clean # # temp += "\n" # # temp_norm = normalize_text(temp) # # if len(temp_norm) == 1: # continue # # with open('/'.join([dirname, output]), 'w') as n: # n.write(temp_norm) # # alldata += temp_norm # #with open('/'.join([dirname, 'alldata-id.txt']), 'w') as f: # for idx, line in enumerate(alldata.splitlines()): # num_line = "_*{0} {1}\n".format(idx, line) # f.write(num_line) # #SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment') # #alldocs = [] # will hold all docs in original order #with open(os.path.join(dirname, 'alldata-id.txt')) as alldata: # for line_no, line in enumerate(alldata): # tokens = gensim.utils.to_unicode(line).split() # words = tokens[1:] # tags = [line_no] # `tags = [tokens[0]]` would also work at extra memory cost # split = ['train','test','extra','extra'][line_no//25000] # 25k train, 25k test, 25k extra # sentiment = [1.0, 0.0, 1.0, 0.0, None, None, None, None][line_no//12500] # [12.5K pos, 12.5K neg]*2 then unknown # alldocs.append(SentimentDocument(words, tags, split, sentiment)) # #train_docs = [doc for doc in alldocs if doc.split == 'train'] #test_docs = [doc for doc in alldocs if doc.split == 'test'] #doc_list = alldocs[:] # for reshuffling per pass # #cores = multiprocessing.cpu_count() #assert gensim.models.doc2vec.FAST_VERSION > -1, "this will be painfully slow otherwise" # #simple_models = [ # # PV-DM w/concatenation - window=5 (both sides) approximates paper's 10-word total window size # Doc2Vec(dm=1, dm_concat=1, size=100, window=5, negative=5, hs=0, min_count=2, workers=cores), # # PV-DBOW # Doc2Vec(dm=0, size=100, negative=5, hs=0, min_count=2, workers=cores), # # PV-DM w/average # Doc2Vec(dm=1, dm_mean=1, size=100, window=10, negative=5, hs=0, min_count=2, workers=cores), #] # ## speed setup by sharing results of 1st model's vocabulary scan #simple_models[0].build_vocab(alldocs) # PV-DM/concat requires one special NULL word so it serves as template #for model in simple_models[1:]: # model.reset_from(simple_models[0]) # #models_by_name = OrderedDict((str(model), model) for model in simple_models) # #models_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[2]]) #models_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[0]]) # ## Create a document vector list and save it #doc_vec_list = [x.docvecs for x in simple_models] docvecs = pickle.load(open('docvecs.p', 'rb')) print len(docvecs) print len(docvecs[0]) print docvecs[0] for i,x in enumerate(docvecs): with open('docvecs_'+str(i)+'.txt','w') as f: for vec in x: f.write(vec) f.write("\n") # pickle.dump(models_by_name, open('model.p', 'wb'))
2.109375
2
caravan_search_engine/test/test_task.py
crest-cassia/caravan
4
177
<gh_stars>1-10 import unittest from caravan.task import Task from caravan.tables import Tables class TestRun(unittest.TestCase): def setUp(self): self.t = Tables.get() self.t.clear() def test_task(self): t = Task(1234, "echo hello world") self.assertEqual(t.id(), 1234) self.assertEqual(t.is_finished(), False) self.assertEqual(t.command(), "echo hello world") t._store_result([1.0, 2.0, 3.0], 0, 3, 111, 222) self.assertTrue(t.is_finished()) self.assertEqual(t.rc(), 0) self.assertEqual(t.rank(), 3) self.assertEqual(t.start_at(), 111) self.assertEqual(t.finish_at(), 222) def test_create(self): for i in range(10): t = Task.create("echo %d" % i) self.assertEqual(t.id(), i) self.assertEqual(t.is_finished(), False) self.assertEqual(len(Task.all()), 10) def test_all(self): tasks = [Task.create("echo %d" % i) for i in range(10)] self.assertEqual(Task.all(), tasks) def test_find(self): tasks = [Task.create("echo %d" % i) for i in range(10)] self.assertEqual(Task.find(5).id(), 5) self.assertEqual(Task.find(5), tasks[5]) if __name__ == '__main__': unittest.main()
2.6875
3
splash/render_options.py
tashidexiaoL/splashnew
3,612
178
<reponame>tashidexiaoL/splashnew # -*- coding: utf-8 -*- import os import json from splash import defaults from splash.utils import to_bytes, path_join_secure from splash.errors import BadOption class RenderOptions(object): """ Options that control how to render a response. """ _REQUIRED = object() def __init__(self, data, max_timeout): self.data = data self.max_timeout = max_timeout @classmethod def raise_error(cls, argument, description, type='bad_argument', **kwargs): params = { 'type': type, 'argument': argument, 'description': description } params.update(kwargs) raise BadOption(params) @classmethod def fromrequest(cls, request, max_timeout): """ Initialize options from a Twisted Request. """ # 1. GET / POST data data = {key.decode('utf-8'): values[0].decode('utf-8') for key, values in request.args.items()} if request.method == b'POST': content_type = request.getHeader(b'content-type') if content_type: request.content.seek(0) # 2. application/json POST data if b'application/json' in content_type: try: content = request.content.read().decode('utf-8') data.update(json.loads(content)) except ValueError as e: raise BadOption({ 'type': 'invalid_json', 'description': "Can't decode JSON", 'message': str(e), }) # 3. js_source from application/javascript POST requests if b'application/javascript' in content_type: data['js_source'] = request.content.read().decode('utf-8') request.content.seek(0) data['uid'] = id(request) return cls(data, max_timeout) def get_expired_args(self, cache): """ Return a list of argument names from load_args which can't be loaded """ return cache.get_missing(self.get_load_args().items()) def save_args_to_cache(self, cache): """ Process save_args and put all values to cache. Return a list of (name, key) pairs. """ save_args = self.get_save_args() save_values = [self.data.get(name) for name in save_args] keys = cache.add_many(save_values) return list(zip(save_args, keys)) def load_cached_args(self, cache): load_args = self.get_load_args() for name, key in (load_args or {}).items(): self.data[name] = cache[key] def get(self, name, default=_REQUIRED, type=str, range=None): value = self.data.get(name) if value is not None: if type is not None: try: value = type(value) except ValueError: msg = "Argument %r has a wrong type" % (name,) self.raise_error(name, msg, required_type=type.__name__) if range is not None and not (range[0] <= value <= range[1]): self.raise_error(name, 'Argument is out of the allowed range', min=range[0], max=range[1], value=value) return value elif default is self._REQUIRED: self.raise_error(name, 'Required argument is missing: %s' % name, type='argument_required') else: return default def _get_bool(self, name, default=_REQUIRED): return self.get(name, default, type=int, range=(0, 1)) def _get_url(self, name, default=_REQUIRED): url = self.get(name, default, type=None) if isinstance(url, bytes): url = url.decode('utf8') return url def get_uid(self): return self.get('uid') def get_url(self): return self._get_url("url") def get_baseurl(self): return self._get_url("baseurl", default=None) def get_wait(self): return self.get("wait", defaults.WAIT_TIME, type=float, range=(0, self.get_timeout())) def get_timeout(self): default = min(self.max_timeout, defaults.TIMEOUT) return self.get("timeout", default, type=float, range=(0, self.max_timeout)) def get_resource_timeout(self): return self.get("resource_timeout", defaults.RESOURCE_TIMEOUT, type=float, range=(0, 1e6)) def get_response_body(self): return self._get_bool("response_body", defaults.RESPONSE_BODY_ENABLED) def get_request_body(self): return self._get_bool("request_body", defaults.REQUEST_BODY_ENABLED) def get_images(self): return self._get_bool("images", defaults.AUTOLOAD_IMAGES) def get_proxy(self): return self.get("proxy", default=None) def get_js_source(self): return self.get("js_source", default=None) def get_width(self): return self.get("width", None, type=int, range=(1, defaults.MAX_WIDTH)) def get_height(self): return self.get("height", None, type=int, range=(1, defaults.MAX_HEIGTH)) def get_scale_method(self): scale_method = self.get("scale_method", defaults.IMAGE_SCALE_METHOD) allowed_scale_methods = ['raster', 'vector'] if scale_method not in allowed_scale_methods: self.raise_error( argument='scale_method', description="Invalid 'scale_method': %s" % scale_method, allowed=allowed_scale_methods, received=scale_method, ) return scale_method def get_quality(self): return self.get("quality", defaults.JPEG_QUALITY, type=int, range=(0, 100)) def get_http_method(self): method = self.get("http_method", "GET") if method.upper() not in ["POST", "GET"]: self.raise_error("http_method", "Unsupported HTTP method {}".format(method)) return method def get_body(self): body = self.get("body", None, to_bytes) method = self.get("http_method", "GET").upper() if method == 'GET' and body: self.raise_error("body", "GET request should not have a body") return body def get_render_all(self, wait=None): result = self._get_bool("render_all", False) if result == 1 and wait == 0: self.raise_error("render_all", "Pass non-zero 'wait' to render full webpage") return result def get_lua_source(self): return self.get("lua_source") def get_js_profile(self, js_profiles_path): js_profile = self.get("js", default=None) if not js_profile: return js_profile if js_profiles_path is None: self.raise_error('js', 'Javascript profiles are not enabled on server') try: profile_dir = path_join_secure(js_profiles_path, js_profile) except ValueError as e: # security check fails print(e) self.raise_error('js', 'Javascript profile does not exist') if not os.path.isdir(profile_dir): self.raise_error('js', 'Javascript profile does not exist') return profile_dir def get_headers(self): headers = self.get("headers", default=None, type=None) if headers is None: return headers if not isinstance(headers, (list, tuple, dict)): self.raise_error( argument='headers', description="'headers' must be either a JSON array of " "(name, value) pairs or a JSON object" ) if isinstance(headers, (list, tuple)): for el in headers: string_only = all(isinstance(e, str) for e in el) if not (isinstance(el, (list, tuple)) and len(el) == 2 and string_only): self.raise_error( argument='headers', description="'headers' must be either a JSON array of " "(name, value) pairs or a JSON object" ) return headers def get_save_args(self): save_args = self.get("save_args", default=None, type=None) if save_args is None: return [] if isinstance(save_args, str): # comma-separated string save_args = save_args.split(',') if not isinstance(save_args, list): self.raise_error( argument="save_args", description="'save_args' should be either a comma-separated " "string or a JSON array with argument names", ) # JSON array if not all(isinstance(a, str) for a in save_args): self.raise_error( argument="save_args", description="'save_args' should be a list of strings", ) return save_args def get_load_args(self): load_args = self.get("load_args", default=None, type=None) if load_args is None: return {} if isinstance(load_args, str): try: load_args = dict( kv.split("=", 1) for kv in load_args.split(';') ) except ValueError: self.raise_error( argument="load_args", description="'load_args' string value is not a " "semicolon-separated list of name=hash pairs" ) if not isinstance(load_args, dict): self.raise_error( argument="load_args", description="'load_args' should be either a JSON object with " "argument hashes or a semicolon-separated list " "of name=hash pairs" ) return load_args def get_viewport(self, wait=None): viewport = self.get("viewport", defaults.VIEWPORT_SIZE) if viewport == 'full': if wait == 0: self.raise_error("viewport", "Pass non-zero 'wait' to render full webpage") else: try: validate_size_str(viewport) except ValueError as e: self.raise_error("viewport", str(e)) return viewport def get_filters(self, pool=None, adblock_rules=None): filter_names = self.get('filters', '') filter_names = [f for f in filter_names.split(',') if f] if pool is None and adblock_rules is None: # skip validation return filter_names if not filter_names: return filter_names if pool is not None: adblock_rules = pool.network_manager_factory.adblock_rules if adblock_rules is None: self.raise_error( "filters", "Invalid filter names: %s" % (filter_names,) ) if adblock_rules is not None: unknown_filters = adblock_rules.get_unknown_filters(filter_names) if unknown_filters: self.raise_error( "filters", "Invalid filter names: %s" % (unknown_filters,) ) return filter_names def get_allowed_domains(self): allowed_domains = self.get("allowed_domains", default=None) if allowed_domains is not None: return allowed_domains.split(',') def get_allowed_content_types(self): content_types = self.get("allowed_content_types", default=['*']) if isinstance(content_types, str): content_types = list(filter(None, content_types.split(','))) return content_types def get_forbidden_content_types(self): content_types = self.get("forbidden_content_types", default=[]) if isinstance(content_types, str): content_types = list(filter(None, content_types.split(','))) return content_types def get_html5_media(self): return self._get_bool("html5_media", defaults.HTML5_MEDIA_ENABLED) def get_engine(self, browser_engines_enabled=None): engine = self.get("engine", default="webkit", type=str) if engine not in {"webkit", "chromium"}: self.raise_error("engine", "Unknown render engine {}".format(engine)) if browser_engines_enabled is not None: if engine not in browser_engines_enabled: self.raise_error("engine", "Disabled render engine {}".format(engine)) return engine def get_http2(self): engine = self.get_engine() if self.get_engine() == "webkit": default = defaults.WEBKIT_HTTP2_ENABLED else: assert engine == 'chromium' default = defaults.CHROMIUM_HTTP2_ENABLED return self._get_bool("http2", default) def get_common_params(self, js_profiles_path): wait = self.get_wait() return { 'url': self.get_url(), 'baseurl': self.get_baseurl(), 'wait': wait, 'resource_timeout': self.get_resource_timeout(), 'viewport': self.get_viewport(wait), 'render_all': self.get_render_all(wait), 'images': self.get_images(), 'headers': self.get_headers(), 'proxy': self.get_proxy(), 'js_profile': self.get_js_profile(js_profiles_path), 'js_source': self.get_js_source(), 'http_method': self.get_http_method(), 'body': self.get_body(), 'html5_media': self.get_html5_media(), 'http2': self.get_http2(), # 'lua': self.get_lua(), } def get_image_params(self): return { 'width': self.get_width(), 'height': self.get_height(), 'scale_method': self.get_scale_method() } def get_png_params(self): return self.get_image_params() def get_jpeg_params(self): params = {'quality': self.get_quality()} params.update(self.get_image_params()) return params def get_include_params(self): return dict( html=self._get_bool("html", defaults.DO_HTML), iframes=self._get_bool("iframes", defaults.DO_IFRAMES), png=self._get_bool("png", defaults.DO_PNG), jpeg=self._get_bool("jpeg", defaults.DO_JPEG), script=self._get_bool("script", defaults.SHOW_SCRIPT), console=self._get_bool("console", defaults.SHOW_CONSOLE), history=self._get_bool("history", defaults.SHOW_HISTORY), har=self._get_bool("har", defaults.SHOW_HAR), ) def validate_size_str(size_str): """ Validate size string in WxH format. Can be used to validate both viewport and window size strings. Does not special-case ``'full'`` viewport. Raises ``ValueError`` if anything goes wrong. :param size_str: string to validate """ max_width = defaults.VIEWPORT_MAX_WIDTH max_heigth = defaults.VIEWPORT_MAX_HEIGTH max_area = defaults.VIEWPORT_MAX_AREA try: w, h = map(int, size_str.split('x')) except ValueError: raise ValueError("Invalid viewport format: %s" % size_str) else: if not ((0 < w <= max_width) and (0 < h <= max_heigth) and (w * h < max_area)): raise ValueError("Viewport (%dx%d, area=%d) is out of range (%dx%d, area=%d)" % (w, h, w * h, max_width, max_heigth, max_area))
2.34375
2
syntax/func.py
sangumee/Opentutorials-Webn-Python
0
179
<filename>syntax/func.py # code.... a = 1 b = 2 c = 3 s = a+b+c r = s/3 print(r) # code.... ''' def average(): a=1 b=2 c=3 s=a+b+c r=s/3 print(r) average() ''' ''' #input #parameter #argument def average(a,b,c): s=a+b+c r=s/3 print(r) average(10,20,30) ''' def average(a, b, c): s = a+b+c r = s/3 return r print(average(10, 20, 30))
3.625
4
tools/evolution/codingSnps_filter.py
ramezrawas/galaxy-1
1
180
#!/usr/bin/env python # runs after the job (and after the default post-filter) from galaxy.tools.parameters import DataToolParameter # Older py compatibility try: set() except: from sets import Set as set def validate_input( trans, error_map, param_values, page_param_map ): dbkeys = set() data_param_names = set() data_params = 0 for name, param in page_param_map.items(): if isinstance( param, DataToolParameter ): # for each dataset parameter if param_values.get(name, None) is not None: dbkeys.add( param_values[name].dbkey ) data_params += 1 # check meta data try: param = param_values[name] int( param.metadata.startCol ) int( param.metadata.endCol ) int( param.metadata.chromCol ) if param.metadata.strandCol is not None: int( param.metadata.strandCol ) except: error_msg = ("The attributes of this dataset are not properly set. " "Click the pencil icon in the history item to set the chrom, start, end and strand columns.") error_map[name] = error_msg data_param_names.add( name ) if len( dbkeys ) > 1: for name in data_param_names: error_map[name] = "All datasets must belong to same genomic build, " \ "this dataset is linked to build '%s'" % param_values[name].dbkey if data_params != len(data_param_names): for name in data_param_names: error_map[name] = "A dataset of the appropriate type is required"
2.328125
2
qa/rpc-tests/listtransactions.py
DeftNerd/bitcoinclassic
8
181
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the listtransactions API from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * def check_array_result(object_array, to_match, expected): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. """ num_matched = 0 for item in object_array: all_match = True for key,value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue for key,value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value))) num_matched = num_matched+1 if num_matched == 0: raise AssertionError("No objects matched %s"%(str(to_match))) class ListTransactionsTest(BitcoinTestFramework): def run_test(self): # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() check_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0}) check_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0}) # mine a block, confirmations should change: self.nodes[0].generate(1) self.sync_all() check_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1}) check_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1}) # send-to-self: txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) check_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"send"}, {"amount":Decimal("-0.2")}) check_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"receive"}, {"amount":Decimal("0.2")}) # sendmany from node1: twice to self, twice to node2: send_to = { self.nodes[0].getnewaddress() : 0.11, self.nodes[1].getnewaddress() : 0.22, self.nodes[0].getaccountaddress("from1") : 0.33, self.nodes[1].getaccountaddress("toself") : 0.44 } txid = self.nodes[1].sendmany("", send_to) self.sync_all() check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.11")}, {"txid":txid} ) check_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.11")}, {"txid":txid} ) check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.22")}, {"txid":txid} ) check_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.22")}, {"txid":txid} ) check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.33")}, {"txid":txid} ) check_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.33")}, {"txid":txid, "account" : "from1"} ) check_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.44")}, {"txid":txid, "account" : ""} ) check_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.44")}, {"txid":txid, "account" : "toself"} ) multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()]) self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) self.nodes[1].generate(1) self.sync_all() assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0) check_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True), {"category":"receive","amount":Decimal("0.1")}, {"txid":txid, "account" : "watchonly"} ) if __name__ == '__main__': ListTransactionsTest().main()
2.34375
2
salt/modules/mount.py
aletourneau/salt
0
182
# -*- coding: utf-8 -*- ''' Salt module to manage unix mounts and the fstab file ''' from __future__ import absolute_import # Import python libs import os import re import logging # Import salt libs import salt.utils from salt._compat import string_types from salt.utils import which as _which from salt.exceptions import CommandNotFoundError, CommandExecutionError # Set up logger log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mount' def __virtual__(): ''' Only load on POSIX-like systems ''' # Disable on Windows, a specific file module exists: if salt.utils.is_windows(): return False return True def _list_mounts(): ret = {} if __grains__['os'] in ['MacOS', 'Darwin']: mounts = __salt__['cmd.run_stdout']('mount') else: mounts = __salt__['cmd.run_stdout']('mount -l') for line in mounts.split('\n'): comps = re.sub(r"\s+", " ", line).split() ret[comps[2]] = comps[0] return ret def _active_mountinfo(ret): _list = _list_mounts() filename = '/proc/self/mountinfo' if not os.access(filename, os.R_OK): msg = 'File not readable {0}' raise CommandExecutionError(msg.format(filename)) blkid_info = __salt__['disk.blkid']() with salt.utils.fopen(filename) as ifile: for line in ifile: comps = line.split() device = comps[2].split(':') device_name = comps[8] device_uuid = None if device_name: device_uuid = blkid_info.get(device_name, {}).get('UUID') device_uuid = device_uuid and device_uuid.lower() ret[comps[4]] = {'mountid': comps[0], 'parentid': comps[1], 'major': device[0], 'minor': device[1], 'root': comps[3], 'opts': comps[5].split(','), 'fstype': comps[7], 'device': device_name, 'alt_device': _list.get(comps[4], None), 'superopts': comps[9].split(','), 'device_uuid': device_uuid} return ret def _active_mounts(ret): ''' List active mounts on Linux systems ''' _list = _list_mounts() filename = '/proc/self/mounts' if not os.access(filename, os.R_OK): msg = 'File not readable {0}' raise CommandExecutionError(msg.format(filename)) with salt.utils.fopen(filename) as ifile: for line in ifile: comps = line.split() ret[comps[1]] = {'device': comps[0], 'alt_device': _list.get(comps[1], None), 'fstype': comps[2], 'opts': comps[3].split(',')} return ret def _active_mounts_freebsd(ret): ''' List active mounts on FreeBSD systems ''' for line in __salt__['cmd.run_stdout']('mount -p').split('\n'): comps = re.sub(r"\s+", " ", line).split() ret[comps[1]] = {'device': comps[0], 'fstype': comps[2], 'opts': comps[3].split(',')} return ret def _active_mounts_solaris(ret): ''' List active mounts on Solaris systems ''' for line in __salt__['cmd.run_stdout']('mount -v').split('\n'): comps = re.sub(r"\s+", " ", line).split() ret[comps[2]] = {'device': comps[0], 'fstype': comps[4], 'opts': comps[5].split('/')} return ret def _active_mounts_openbsd(ret): ''' List active mounts on OpenBSD systems ''' for line in __salt__['cmd.run_stdout']('mount -v').split('\n'): comps = re.sub(r"\s+", " ", line).split() nod = __salt__['cmd.run_stdout']('ls -l {0}'.format(comps[0])) nod = ' '.join(nod.split()).split(" ") parens = re.findall(r'\((.*?)\)', line, re.DOTALL) ret[comps[3]] = {'device': comps[0], 'fstype': comps[5], 'opts': parens[1].split(", "), 'major': str(nod[4].strip(",")), 'minor': str(nod[5]), 'device_uuid': parens[0]} return ret def _active_mounts_darwin(ret): ''' List active mounts on Mac OS systems ''' for line in __salt__['cmd.run_stdout']('mount').split('\n'): comps = re.sub(r"\s+", " ", line).split() parens = re.findall(r'\((.*?)\)', line, re.DOTALL)[0].split(", ") ret[comps[2]] = {'device': comps[0], 'fstype': parens[0], 'opts': parens[1:]} return ret def active(extended=False): ''' List the active mounts. CLI Example: .. code-block:: bash salt '*' mount.active ''' ret = {} if __grains__['os'] == 'FreeBSD': _active_mounts_freebsd(ret) elif __grains__['os'] == 'Solaris': _active_mounts_solaris(ret) elif __grains__['os'] == 'OpenBSD': _active_mounts_openbsd(ret) elif __grains__['os'] in ['MacOS', 'Darwin']: _active_mounts_darwin(ret) else: if extended: try: _active_mountinfo(ret) except CommandExecutionError: _active_mounts(ret) else: _active_mounts(ret) return ret def fstab(config='/etc/fstab'): ''' List the contents of the fstab CLI Example: .. code-block:: bash salt '*' mount.fstab ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.fopen(config) as ifile: for line in ifile: if line.startswith('#'): # Commented continue if not line.strip(): # Blank line continue comps = line.split() if len(comps) != 6: # Invalid entry continue ret[comps[1]] = {'device': comps[0], 'fstype': comps[2], 'opts': comps[3].split(','), 'dump': comps[4], 'pass': comps[5]} return ret def rm_fstab(name, device, config='/etc/fstab'): ''' Remove the mount point from the fstab CLI Example: .. code-block:: bash salt '*' mount.rm_fstab /mnt/foo ''' contents = fstab(config) if name not in contents: return True # The entry is present, get rid of it lines = [] try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 6: # Invalid entry lines.append(line) continue comps = line.split() if device: if comps[1] == name and comps[0] == device: continue else: if comps[1] == name: continue lines.append(line) except (IOError, OSError) as exc: msg = "Couldn't read from {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) try: with salt.utils.fopen(config, 'w+') as ofile: ofile.writelines(lines) except (IOError, OSError) as exc: msg = "Couldn't write to {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) return True def set_fstab( name, device, fstype, opts='defaults', dump=0, pass_num=0, config='/etc/fstab', test=False, **kwargs): ''' Verify that this mount is represented in the fstab, change the mount to match the data passed, or add the mount if it is not present. CLI Example: .. code-block:: bash salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) lines = [] change = False present = False if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{0}"'.format(config)) try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 6: # Invalid entry lines.append(line) continue if comps[1] == name or comps[0] == device: # check to see if there are changes # and fix them if there are any present = True if comps[0] != device: change = True comps[0] = device if comps[1] != name: change = True comps[1] = name if comps[2] != fstype: change = True comps[2] = fstype if comps[3] != opts: change = True comps[3] = opts if comps[4] != str(dump): change = True comps[4] = str(dump) if comps[5] != str(pass_num): change = True comps[5] = str(pass_num) if change: log.debug( 'fstab entry for mount point {0} needs to be ' 'updated'.format(name) ) newline = ( '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format( device, name, fstype, opts, dump, pass_num ) ) lines.append(newline) else: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, str(exc))) if change: if not salt.utils.test_mode(test=test, **kwargs): try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return 'change' if not change: if present: # The right entry is already here return 'present' else: if not salt.utils.test_mode(test=test, **kwargs): # The entry is new, add it to the end of the fstab newline = '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(device, name, fstype, opts, dump, pass_num) lines.append(newline) try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): raise CommandExecutionError( 'File not writable {0}'.format( config ) ) return 'new' def rm_automaster(name, device, config='/etc/auto_salt'): ''' Remove the mount point from the auto_master CLI Example: .. code-block:: bash salt '*' mount.rm_automaster /mnt/foo ''' contents = automaster(config) if name not in contents: return True # The entry is present, get rid of it lines = [] try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 3: # Invalid entry lines.append(line) continue comps = line.split() prefix = "/.." name_chk = comps[0].replace(prefix, "") device_fmt = comps[2].split(":") if device: if name_chk == name and device_fmt[1] == device: continue else: if name_chk == name: continue lines.append(line) except (IOError, OSError) as exc: msg = "Couldn't read from {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) try: with salt.utils.fopen(config, 'w+') as ofile: ofile.writelines(lines) except (IOError, OSError) as exc: msg = "Couldn't write to {0}: {1}" raise CommandExecutionError(msg.format(config, str(exc))) # Update automount __salt__['cmd.run']('automount -cv') return True def set_automaster( name, device, fstype, opts='', config='/etc/auto_salt', test=False, **kwargs): ''' Verify that this mount is represented in the auto_salt, change the mount to match the data passed, or add the mount if it is not present. CLI Example: .. code-block:: bash salt '*' mount.set_automaster /mnt/foo /dev/sdz1 ext4 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) lines = [] change = False present = False automaster_file = "/etc/auto_master" if not os.path.isfile(config): __salt__['file.touch'](config) __salt__['file.append'](automaster_file, "/-\t\t\t{0}".format(config)) name = "/..{0}".format(name) device_fmt = "{0}:{1}".format(fstype, device) type_opts = "-fstype={0},{1}".format(fstype, opts) if fstype == 'smbfs': device_fmt = device_fmt.replace(fstype, "") try: with salt.utils.fopen(config, 'r') as ifile: for line in ifile: if line.startswith('#'): # Commented lines.append(line) continue if not line.strip(): # Blank line lines.append(line) continue comps = line.split() if len(comps) != 3: # Invalid entry lines.append(line) continue if comps[0] == name or comps[2] == device_fmt: # check to see if there are changes # and fix them if there are any present = True if comps[0] != name: change = True comps[0] = name if comps[1] != type_opts: change = True comps[1] = type_opts if comps[2] != device_fmt: change = True comps[2] = device_fmt if change: log.debug( 'auto_master entry for mount point {0} needs to be ' 'updated'.format(name) ) newline = ( '{0}\t{1}\t{2}\n'.format( name, type_opts, device_fmt) ) lines.append(newline) else: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, str(exc))) if change: if not salt.utils.test_mode(test=test, **kwargs): try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return 'change' if not change: if present: # The right entry is already here return 'present' else: if not salt.utils.test_mode(test=test, **kwargs): # The entry is new, add it to the end of the fstab newline = ( '{0}\t{1}\t{2}\n'.format( name, type_opts, device_fmt) ) lines.append(newline) try: with salt.utils.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines(lines) except (IOError, OSError): raise CommandExecutionError( 'File not writable {0}'.format( config ) ) return 'new' def automaster(config='/etc/auto_salt'): ''' List the contents of the fstab CLI Example: .. code-block:: bash salt '*' mount.fstab ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.fopen(config) as ifile: for line in ifile: if line.startswith('#'): # Commented continue if not line.strip(): # Blank line continue comps = line.split() if len(comps) != 3: # Invalid entry continue prefix = "/.." name = comps[0].replace(prefix, "") device_fmt = comps[2].split(":") opts = comps[1].split(',') ret[name] = {'device': device_fmt[1], 'fstype': opts[0], 'opts': opts[1:]} return ret def mount(name, device, mkmnt=False, fstype='', opts='defaults', user=None): ''' Mount a device CLI Example: .. code-block:: bash salt '*' mount.mount /mnt/foo /dev/sdz1 True ''' # Darwin doesn't expect defaults when mounting without other options if 'defaults' in opts and __grains__['os'] in ['MacOS', 'Darwin']: opts = None if isinstance(opts, string_types): opts = opts.split(',') if not os.path.exists(name) and mkmnt: __salt__['file.mkdir'](name=name, user=user) args = '' if opts is not None: lopts = ','.join(opts) args = '-o {0}'.format(lopts) if fstype: args += ' -t {0}'.format(fstype) cmd = 'mount {0} {1} {2} '.format(args, device, name) out = __salt__['cmd.run_all'](cmd, runas=user) if out['retcode']: return out['stderr'] return True def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None): ''' Attempt to remount a device, if the device is not already mounted, mount is called CLI Example: .. code-block:: bash salt '*' mount.remount /mnt/foo /dev/sdz1 True ''' force_mount = False if __grains__['os'] in ['MacOS', 'Darwin']: if opts == 'defaults': opts = 'noowners' if fstype == 'smbfs': force_mount = True if isinstance(opts, string_types): opts = opts.split(',') mnts = active() if name in mnts: # The mount point is mounted, attempt to remount it with the given data if 'remount' not in opts and __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin']: opts.append('remount') if force_mount: # We need to force the mount but first we should unmount umount(name, device, user=user) lopts = ','.join(opts) args = '-o {0}'.format(lopts) if fstype: args += ' -t {0}'.format(fstype) if __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin'] or force_mount: cmd = 'mount {0} {1} {2} '.format(args, device, name) else: cmd = 'mount -u {0} {1} {2} '.format(args, device, name) out = __salt__['cmd.run_all'](cmd, runas=user) if out['retcode']: return out['stderr'] return True # Mount a filesystem that isn't already return mount(name, device, mkmnt, fstype, opts, user=user) def umount(name, device=None, user=None): ''' Attempt to unmount a device by specifying the directory it is mounted on CLI Example: .. code-block:: bash salt '*' mount.umount /mnt/foo .. versionadded:: Lithium salt '*' mount.umount /mnt/foo /dev/xvdc1 ''' mnts = active() if name not in mnts: return "{0} does not have anything mounted".format(name) if not device: cmd = 'umount {0}'.format(name) else: cmd = 'umount {0}'.format(device) out = __salt__['cmd.run_all'](cmd, runas=user) if out['retcode']: return out['stderr'] return True def is_fuse_exec(cmd): ''' Returns true if the command passed is a fuse mountable application. CLI Example: .. code-block:: bash salt '*' mount.is_fuse_exec sshfs ''' cmd_path = _which(cmd) # No point in running ldd on a command that doesn't exist if not cmd_path: return False elif not _which('ldd'): raise CommandNotFoundError('ldd') out = __salt__['cmd.run']('ldd {0}'.format(cmd_path)) return 'libfuse' in out def swaps(): ''' Return a dict containing information on active swap CLI Example: .. code-block:: bash salt '*' mount.swaps ''' ret = {} if __grains__['os'] != 'OpenBSD': with salt.utils.fopen('/proc/swaps') as fp_: for line in fp_: if line.startswith('Filename'): continue comps = line.split() ret[comps[0]] = {'type': comps[1], 'size': comps[2], 'used': comps[3], 'priority': comps[4]} else: for line in __salt__['cmd.run_stdout']('swapctl -kl').splitlines(): if line.startswith(('Device', 'Total')): continue swap_type = "file" comps = line.split() if comps[0].startswith('/dev/'): swap_type = "partition" ret[comps[0]] = {'type': swap_type, 'size': comps[1], 'used': comps[2], 'priority': comps[5]} return ret def swapon(name, priority=None): ''' Activate a swap disk CLI Example: .. code-block:: bash salt '*' mount.swapon /root/swapfile ''' ret = {} on_ = swaps() if name in on_: ret['stats'] = on_[name] ret['new'] = False return ret cmd = 'swapon {0}'.format(name) if priority: cmd += ' -p {0}'.format(priority) __salt__['cmd.run'](cmd) on_ = swaps() if name in on_: ret['stats'] = on_[name] ret['new'] = True return ret return ret def swapoff(name): ''' Deactivate a named swap mount CLI Example: .. code-block:: bash salt '*' mount.swapoff /root/swapfile ''' on_ = swaps() if name in on_: if __grains__['os'] != 'OpenBSD': __salt__['cmd.run']('swapoff {0}'.format(name)) else: __salt__['cmd.run']('swapctl -d {0}'.format(name)) on_ = swaps() if name in on_: return False return True return None def is_mounted(name): ''' .. versionadded:: 2014.7.0 Provide information if the path is mounted CLI Example: .. code-block:: bash salt '*' mount.is_mounted /mnt/share ''' active_ = active() if name in active_: return True else: return False
2.1875
2
base/admin.py
ExpertOfNone/expert_of_none
0
183
from django.contrib import admin from base.models import Topic, Photo class EONBaseAdmin(admin.ModelAdmin): def get_changeform_initial_data(self, request): initial = super().get_changeform_initial_data(request) if 'add' in request.META['PATH_INFO']: initial['created_by'] = request.user initial['modified_by'] = request.user return initial def save_model(self, request, obj, form, change): if not obj.created_by: obj.created_by = request.user return super().save_model(request, obj, form, change) class TopicAdmin(EONBaseAdmin): list_display = [ 'name', 'parent_topic', 'top_level', 'modified_by', 'modified', 'created_by', 'created', ] class PhotoAdmin(EONBaseAdmin): # TODO Add Proper List Display pass admin.site.register(Topic, TopicAdmin) admin.site.register(Photo, PhotoAdmin)
1.929688
2
met/metadataparser/models/entity_type.py
z1digitalstudio/met
11
184
<filename>met/metadataparser/models/entity_type.py<gh_stars>10-100 ################################################################# # MET v2 Metadate Explorer Tool # # This Software is Open Source. See License: https://github.com/TERENA/met/blob/master/LICENSE.md # Copyright (c) 2012, TERENA All rights reserved. # # This Software is based on MET v1 developed for TERENA by Yaco Sistemas, http://www.yaco.es/ # MET v2 was developed for TERENA by <NAME>, DAASI International GmbH, http://www.daasi.de # Current version of MET has been revised for performance improvements by <NAME>, # Consortium GARR, http://www.garr.it ########################################################################## from django.db import models from django.utils.translation import ugettext_lazy as _ class EntityType(models.Model): """ Model describing the type of an entity. """ name = models.CharField(blank=False, max_length=20, unique=True, verbose_name=_(u'Name'), db_index=True) xmlname = models.CharField(blank=False, max_length=20, unique=True, verbose_name=_(u'Name in XML'), db_index=True) def __unicode__(self): return self.name
1.882813
2
wxpy/bot.py
daimajia/wxpy
34
185
import traceback from pprint import pformat from threading import Thread import itchat import logging from wxpy.chat import Chat from wxpy.chats import Chats from wxpy.friend import Friend from wxpy.group import Group from wxpy.message import MessageConfigs, Messages, Message, MessageConfig from wxpy.mp import MP from wxpy.response import ResponseError from wxpy.user import User from wxpy.utils.constants import SYSTEM from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list logger = logging.getLogger('wxpy') class Robot(object): """ 机器人对象,用于登陆和操作微信账号,涵盖大部分 Web 微信的功能 """ def __init__( self, save_path=None, console_qr=False, qr_path=None, qr_callback=None, login_callback=None, logout_callback=None ): """ :param save_path: | 用于保存或载入登陆状态的文件路径,例如: 'wxpy.pkl',为空则不尝试载入。 | 填写本参数后,可在短时间内重新载入登陆状态,避免重复扫码,失效时会重新要求登陆 :param console_qr: 在终端中显示登陆二维码,需要安装 Pillow 模块 :param qr_path: 保存二维码的路径 :param qr_callback: 获得二维码时的回调,接收参数: uuid, status, qrcode :param login_callback: 登陆时的回调,接收参数同上 :param logout_callback: 登出时的回调,接收参数同上 """ self.core = itchat.Core() itchat.instanceList.append(self) self.core.auto_login( hotReload=bool(save_path), statusStorageDir=save_path, enableCmdQR=console_qr, picDir=qr_path, qrCallback=qr_callback, loginCallback=login_callback, exitCallback=logout_callback ) self.message_configs = MessageConfigs(self) self.messages = Messages(robot=self) self.file_helper = Chat(wrap_user_name('filehelper')) self.file_helper.robot = self self.file_helper.nick_name = '文件传输助手' self.self = Chat(self.core.loginInfo['User']) self.self.robot = self self.save_path = save_path def __repr__(self): return '<{}: {}>'.format(self.__class__.__name__, self.self.name) @handle_response() def logout(self): """ 登出当前账号 """ return self.core.logout() @property def alive(self): """ 当前的登陆状态 :return: 若为登陆状态,则为 True,否则为 False """ return self.core.alive @alive.setter def alive(self, value): self.core.alive = value def dump_login_status(self, save_path=None): return self.core.dump_login_status(save_path or self.save_path) # chats def except_self(self, chats_or_dicts): """ 从聊天对象合集或用户字典列表中排除自身 :param chats_or_dicts: 聊天对象合集或用户字典列表 :return: 排除自身后的列表 """ return list(filter(lambda x: get_user_name(x) != self.self.user_name, chats_or_dicts)) def chats(self, update=False): """ 获取所有聊天对象 :param update: 是否更新 :return: 聊天对象合集 """ return Chats(self.friends(update) + self.groups(update) + self.mps(update), self) def friends(self, update=False): """ 获取所有好友 :param update: 是否更新 :return: 聊天对象合集 """ @handle_response(Friend) def do(): return self.core.get_friends(update=update) ret = do() ret.source = self return ret @handle_response(Group) def groups(self, update=False, contact_only=False): """ 获取所有群聊 :param update: 是否更新 :param contact_only: 是否限于保存为联系人的群聊 :return: 群聊合集 """ return self.core.get_chatrooms(update=update, contactOnly=contact_only) @handle_response(MP) def mps(self, update=False): """ 获取所有公众号 :param update: 是否更新 :return: 聊天对象合集 """ return self.core.get_mps(update=update) @handle_response(User) def user_details(self, user_or_users, chunk_size=50): """ 获取单个或批量获取多个用户的详细信息(地区、性别、签名等),但不可用于群聊成员 :param user_or_users: 单个或多个用户对象或 user_name :param chunk_size: 分配请求时的单批数量,目前为 50 :return: 单个或多个用户用户的详细信息 """ def chunks(): total = ensure_list(user_or_users) for i in range(0, len(total), chunk_size): yield total[i:i + chunk_size] @handle_response() def process_one_chunk(_chunk): return self.core.update_friend(userName=get_user_name(_chunk)) if isinstance(user_or_users, (list, tuple)): ret = list() for chunk in chunks(): chunk_ret = process_one_chunk(chunk) if isinstance(chunk_ret, list): ret += chunk_ret else: ret.append(chunk_ret) return ret else: return process_one_chunk(user_or_users) def search(self, name=None, **attributes): """ 在所有类型的聊天对象中进行搜索 :param name: 名称 (可以是昵称、备注等) :param attributes: 属性键值对,键可以是 sex(性别), province(省份), city(城市) 等。例如可指定 province='广东' :return: 匹配的聊天对象合集 """ return self.chats().search(name, **attributes) # add / create @handle_response() def add_friend(self, user, verify_content=''): """ 添加用户为好友 :param user: 用户对象或用户名 :param verify_content: 验证说明信息 """ return self.core.add_friend( userName=get_user_name(user), status=2, verifyContent=verify_content, autoUpdate=True ) @handle_response() def accept_friend(self, user, verify_content=''): """ 接受用户为好友 :param user: 用户对象或用户名 :param verify_content: 验证说明信息 """ # Todo: 验证好友接口可用性,并在接受好友时直接返回新好友 return self.core.add_friend( userName=get_user_name(user), status=3, verifyContent=verify_content, autoUpdate=True ) def create_group(self, users, topic=None): """ 创建一个新的群聊 :param users: 用户列表 :param topic: 群名称 :return: 若建群成功,返回一个新的群聊对象 """ @handle_response() def request(): return self.core.create_chatroom( memberList=wrap_user_name(users), topic=topic or '' ) ret = request() user_name = ret.get('ChatRoomName') if user_name: return Group(self.core.update_chatroom(userName=user_name)) else: raise ResponseError('Failed to create group:\n{}'.format(pformat(ret))) # messages def _process_message(self, msg): """ 处理接收到的消息 """ if not self.alive: return func, run_async = self.message_configs.get_func(msg) if not func: return def process(): # noinspection PyBroadException try: ret = func(msg) if ret is not None: if isinstance(ret, (tuple, list)): self.core.send( msg=str(ret[0]), toUserName=msg.chat.user_name, mediaId=ret[1] ) else: self.core.send( msg=str(ret), toUserName=msg.chat.user_name ) except: logger.warning( 'An error occurred in registered function, ' 'use `Robot().start(debug=True)` to show detailed information') logger.debug(traceback.format_exc()) if run_async: Thread(target=process).start() else: process() def register( self, chats=None, msg_types=None, except_self=True, run_async=True, enabled=True ): """ 装饰器:用于注册消息配置 :param chats: 单个或列表形式的多个聊天对象或聊天类型,为空时匹配所有聊天对象 :param msg_types: 单个或列表形式的多个消息类型,为空时匹配所有消息类型 (SYSTEM 类消息除外) :param except_self: 排除自己在手机上发送的消息 :param run_async: 异步执行配置的函数,可提高响应速度 :param enabled: 当前配置的默认开启状态,可事后动态开启或关闭 """ def register(func): self.message_configs.append(MessageConfig( robot=self, func=func, chats=chats, msg_types=msg_types, except_self=except_self, run_async=run_async, enabled=enabled )) return func return register def start(self, block=True): """ 开始监听和处理消息 :param block: 是否堵塞线程,为 False 时将在新的线程中运行 """ def listen(): logger.info('{} Auto-reply started.'.format(self)) try: while self.alive: msg = Message(self.core.msgList.get(), self) if msg.type is not SYSTEM: self.messages.append(msg) self._process_message(msg) except KeyboardInterrupt: logger.info('KeyboardInterrupt received, ending...') self.alive = False if self.core.useHotReload: self.dump_login_status() logger.info('Bye.') if block: listen() else: t = Thread(target=listen, daemon=True) t.start()
1.90625
2
glue/__init__.py
HPLegion/glue
550
186
<reponame>HPLegion/glue<filename>glue/__init__.py # Set up configuration variables __all__ = ['custom_viewer', 'qglue', 'test'] import os import sys from pkg_resources import get_distribution, DistributionNotFound try: __version__ = get_distribution('glue-core').version except DistributionNotFound: __version__ = 'undefined' from ._mpl_backend import MatplotlibBackendSetter sys.meta_path.append(MatplotlibBackendSetter()) from glue.viewers.custom.helper import custom_viewer # Load user's configuration file from .config import load_configuration env = load_configuration() from .qglue import qglue from .main import load_plugins # noqa def test(no_optional_skip=False): from pytest import main root = os.path.abspath(os.path.dirname(__file__)) args = [root, '-x'] if no_optional_skip: args.append('--no-optional-skip') return main(args=args) from glue._settings_helpers import load_settings load_settings() # In PyQt 5.5+, PyQt overrides the default exception catching and fatally # crashes the Qt application without printing out any details about the error. # Below we revert the exception hook to the original Python one. Note that we # can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect # the default excepthook is in place and override it. def handle_exception(exc_type, exc_value, exc_traceback): sys.__excepthook__(exc_type, exc_value, exc_traceback) sys.excepthook = handle_exception
1.796875
2
run.py
pran01/AlgoVision
33
187
from algovision import app if(__name__=="__main__"): app.run(debug=True,host='0.0.0.0')
1.351563
1
readthedocs/settings/proxito/base.py
rffontenelle/readthedocs.org
0
188
""" Base settings for Proxito Some of these settings will eventually be backported into the main settings file, but currently we have them to be able to run the site with the old middleware for a staged rollout of the proxito code. """ class CommunityProxitoSettingsMixin: ROOT_URLCONF = 'readthedocs.proxito.urls' USE_SUBDOMAIN = True SECURE_REFERRER_POLICY = "no-referrer-when-downgrade" # Allow cookies from cross-site requests on subdomains for now. # As 'Lax' breaks when the page is embedded in an iframe. SESSION_COOKIE_SAMESITE = None @property def DATABASES(self): # This keeps connections to the DB alive, # which reduces latency with connecting to postgres dbs = getattr(super(), 'DATABASES', {}) for db in dbs: dbs[db]['CONN_MAX_AGE'] = 86400 return dbs @property def MIDDLEWARE(self): # noqa # Use our new middleware instead of the old one classes = super().MIDDLEWARE classes = list(classes) classes.append('readthedocs.proxito.middleware.ProxitoMiddleware') middleware_to_remove = ( 'csp.middleware.CSPMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) for mw in middleware_to_remove: if mw in classes: classes.remove(mw) else: log.warning('Failed to remove middleware: %s', mw) return classes
1.960938
2
model_selection/tests/test_search.py
jessica-tu/jupyter
0
189
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] common_params = {'estimator': SVC(), 'cv': cv, 'return_train_score': True} search = SearchCV(**common_params, **specialized_params) search.fit(X, y) test_cv_scores = np.array( [search.cv_results_['split%d_test_score' % s][0] for s in range(search.n_splits_)] ) test_mean = search.cv_results_['mean_test_score'][0] test_std = search.cv_results_['std_test_score'][0] train_cv_scores = np.array( [search.cv_results_['split%d_train_score' % s][0] for s in range(search.n_splits_)] ) train_mean = search.cv_results_['mean_train_score'][0] train_std = search.cv_results_['std_train_score'][0] assert search.cv_results_['param_C'][0] == 1 # scores are the same as above assert_allclose(test_cv_scores, [1, 1. / 3.]) assert_allclose(train_cv_scores, [1, 1]) # Unweighted mean/std is used assert test_mean == pytest.approx(np.mean(test_cv_scores)) assert test_std == pytest.approx(np.std(test_cv_scores)) # For the train scores, we do not take a weighted mean irrespective of # i.i.d. or not assert train_mean == pytest.approx(1) assert train_std == pytest.approx(0) def test_grid_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] grid_searches = [] for scoring in ({'accuracy': make_scorer(accuracy_score), 'recall': make_scorer(recall_score)}, 'accuracy', 'recall'): grid_search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False) grid_search.fit(X, y) grid_searches.append(grid_search) compare_cv_results_multimetric_with_single(*grid_searches) def test_random_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 # Scipy 0.12's stats dists do not accept seed, hence we use param grid params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) for refit in (True, False): random_searches = [] for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'): # If True, for multi-metric pass refit='accuracy' if refit: probability = True refit = 'accuracy' if isinstance(scoring, tuple) else refit else: probability = False clf = SVC(probability=probability, random_state=42) random_search = RandomizedSearchCV(clf, n_iter=n_search_iter, cv=n_splits, param_distributions=params, scoring=scoring, refit=refit, random_state=0) random_search.fit(X, y) random_searches.append(random_search) compare_cv_results_multimetric_with_single(*random_searches) compare_refit_methods_when_refit_with_acc( random_searches[0], random_searches[1], refit) def compare_cv_results_multimetric_with_single( search_multi, search_acc, search_rec): """Compare multi-metric cv_results with the ensemble of multiple single metric cv_results from single metric grid/random search""" assert search_multi.multimetric_ assert_array_equal(sorted(search_multi.scorer_), ('accuracy', 'recall')) cv_results_multi = search_multi.cv_results_ cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v for k, v in search_acc.cv_results_.items()} cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v for k, v in search_rec.cv_results_.items()}) # Check if score and timing are reasonable, also checks if the keys # are present assert all((np.all(cv_results_multi[k] <= 1) for k in ( 'mean_score_time', 'std_score_time', 'mean_fit_time', 'std_fit_time'))) # Compare the keys, other than time keys, among multi-metric and # single metric grid search results. np.testing.assert_equal performs a # deep nested comparison of the two cv_results dicts np.testing.assert_equal({k: v for k, v in cv_results_multi.items() if not k.endswith('_time')}, {k: v for k, v in cv_results_acc_rec.items() if not k.endswith('_time')}) def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): """Compare refit multi-metric search methods with single metric methods""" assert search_acc.refit == refit if refit: assert search_multi.refit == 'accuracy' else: assert not search_multi.refit return # search cannot predict/score without refit X, y = make_blobs(n_samples=100, n_features=4, random_state=42) for method in ('predict', 'predict_proba', 'predict_log_proba'): assert_almost_equal(getattr(search_multi, method)(X), getattr(search_acc, method)(X)) assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) for key in ('best_index_', 'best_score_', 'best_params_'): assert getattr(search_multi, key) == getattr(search_acc, key) def test_search_cv_results_rank_tie_breaking(): X, y = make_blobs(n_samples=50, random_state=42) # The two C values are close enough to give similar models # which would result in a tie of their mean cv-scores param_grid = {'C': [1, 1.001, 0.001]} grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True) random_search = RandomizedSearchCV(SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True) for search in (grid_search, random_search): search.fit(X, y) cv_results = search.cv_results_ # Check tie breaking strategy - # Check that there is a tie in the mean scores between # candidates 1 and 2 alone assert_almost_equal(cv_results['mean_test_score'][0], cv_results['mean_test_score'][1]) assert_almost_equal(cv_results['mean_train_score'][0], cv_results['mean_train_score'][1]) assert not np.allclose(cv_results['mean_test_score'][1], cv_results['mean_test_score'][2]) assert not np.allclose(cv_results['mean_train_score'][1], cv_results['mean_train_score'][2]) # 'min' rank should be assigned to the tied candidates assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3]) def test_search_cv_results_none_param(): X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1] estimators = (DecisionTreeRegressor(), DecisionTreeClassifier()) est_parameters = {"random_state": [0, None]} cv = KFold() for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv, ).fit(X, y) assert_array_equal(grid_search.cv_results_['param_random_state'], [0, None]) @ignore_warnings() def test_search_cv_timing(): svc = LinearSVC(random_state=0) X = [[1, ], [2, ], [3, ], [4, ]] y = [0, 1, 1, 0] gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0) rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2) for search in (gs, rs): search.fit(X, y) for key in ['mean_fit_time', 'std_fit_time']: # NOTE The precision of time.time in windows is not high # enough for the fit/score times to be non-zero for trivial X and y assert np.all(search.cv_results_[key] >= 0) assert np.all(search.cv_results_[key] < 1) for key in ['mean_score_time', 'std_score_time']: assert search.cv_results_[key][1] >= 0 assert search.cv_results_[key][0] == 0.0 assert np.all(search.cv_results_[key] < 1) assert hasattr(search, "refit_time_") assert isinstance(search.refit_time_, float) assert search.refit_time_ >= 0 def test_grid_search_correct_score_results(): # test that correct scores are used n_splits = 3 clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] for score in ['f1', 'roc_auc']: grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits) cv_results = grid_search.fit(X, y).cv_results_ # Test scorer names result_keys = list(cv_results.keys()) expected_keys = (("mean_test_score", "rank_test_score") + tuple("split%d_test_score" % cv_i for cv_i in range(n_splits))) assert all(np.in1d(expected_keys, result_keys)) cv = StratifiedKFold(n_splits=n_splits) n_splits = grid_search.n_splits_ for candidate_i, C in enumerate(Cs): clf.set_params(C=C) cv_scores = np.array( list(grid_search.cv_results_['split%d_test_score' % s][candidate_i] for s in range(n_splits))) for i, (train, test) in enumerate(cv.split(X, y)): clf.fit(X[train], y[train]) if score == "f1": correct_score = f1_score(y[test], clf.predict(X[test])) elif score == "roc_auc": dec = clf.decision_function(X[test]) correct_score = roc_auc_score(y[test], dec) assert_almost_equal(correct_score, cv_scores[i]) # FIXME remove test_fit_grid_point as the function will be removed on 0.25 @ignore_warnings(category=FutureWarning) def test_fit_grid_point(): X, y = make_classification(random_state=0) cv = StratifiedKFold() svc = LinearSVC(random_state=0) scorer = make_scorer(accuracy_score) for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}): for train, test in cv.split(X, y): this_scores, this_params, n_test_samples = fit_grid_point( X, y, clone(svc), params, train, test, scorer, verbose=False) est = clone(svc).set_params(**params) est.fit(X[train], y[train]) expected_score = scorer(est, X[test], y[test]) # Test the return values of fit_grid_point assert_almost_equal(this_scores, expected_score) assert params == this_params assert n_test_samples == test.size # Should raise an error upon multimetric scorer assert_raise_message(ValueError, "For evaluating multiple scores, use " "sklearn.model_selection.cross_validate instead.", fit_grid_point, X, y, svc, params, train, test, {'score': scorer}, verbose=True) # FIXME remove test_fit_grid_point_deprecated as # fit_grid_point will be removed on 0.25 def test_fit_grid_point_deprecated(): X, y = make_classification(random_state=0) svc = LinearSVC(random_state=0) scorer = make_scorer(accuracy_score) msg = ("fit_grid_point is deprecated in version 0.23 " "and will be removed in version 0.25") params = {'C': 0.1} train, test = next(StratifiedKFold().split(X, y)) with pytest.warns(FutureWarning, match=msg): fit_grid_point(X, y, svc, params, train, test, scorer, verbose=False) def test_pickle(): # Test that a fit search can be pickled clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3) grid_search.fit(X, y) grid_search_pickled = pickle.loads(pickle.dumps(grid_search)) assert_array_almost_equal(grid_search.predict(X), grid_search_pickled.predict(X)) random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3, cv=3) random_search.fit(X, y) random_search_pickled = pickle.loads(pickle.dumps(random_search)) assert_array_almost_equal(random_search.predict(X), random_search_pickled.predict(X)) def test_grid_search_with_multioutput_data(): # Test search with multi-output estimator X, y = make_multilabel_classification(return_indicator=True, random_state=0) est_parameters = {"max_depth": [1, 2, 3, 4]} cv = KFold() estimators = [DecisionTreeRegressor(random_state=0), DecisionTreeClassifier(random_state=0)] # Test with grid search cv for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv) grid_search.fit(X, y) res_params = grid_search.cv_results_['params'] for cand_i in range(len(res_params)): est.set_params(**res_params[cand_i]) for i, (train, test) in enumerate(cv.split(X, y)): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal( correct_score, grid_search.cv_results_['split%d_test_score' % i][cand_i]) # Test with a randomized search for est in estimators: random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) random_search.fit(X, y) res_params = random_search.cv_results_['params'] for cand_i in range(len(res_params)): est.set_params(**res_params[cand_i]) for i, (train, test) in enumerate(cv.split(X, y)): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal( correct_score, random_search.cv_results_['split%d_test_score' % i][cand_i]) def test_predict_proba_disabled(): # Test predict_proba when disabled on estimator. X = np.arange(20).reshape(5, -1) y = [0, 0, 1, 1, 1] clf = SVC(probability=False) gs = GridSearchCV(clf, {}, cv=2).fit(X, y) assert not hasattr(gs, "predict_proba") def test_grid_search_allows_nans(): # Test GridSearchCV with SimpleImputer X = np.arange(20, dtype=np.float64).reshape(5, -1) X[2, :] = np.nan y = [0, 0, 1, 1, 1] p = Pipeline([ ('imputer', SimpleImputer(strategy='mean', missing_values=np.nan)), ('classifier', MockClassifier()), ]) GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y) class FailingClassifier(BaseEstimator): """Classifier that raises a ValueError on fit()""" FAILING_PARAMETER = 2 def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y=None): if self.parameter == FailingClassifier.FAILING_PARAMETER: raise ValueError("Failing classifier failed as required") def predict(self, X): return np.zeros(X.shape[0]) def score(self, X=None, Y=None): return 0. def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) assert_warns(FitFailedWarning, gs.fit, X, y) n_candidates = len(gs.cv_results_['params']) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. def get_cand_scores(i): return np.array(list(gs.cv_results_['split%d_test_score' % s][i] for s in range(gs.n_splits_))) assert all((np.all(get_cand_scores(cand_i) == 0.0) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER)) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) assert_warns(FitFailedWarning, gs.fit, X, y) n_candidates = len(gs.cv_results_['params']) assert all(np.all(np.isnan(get_cand_scores(cand_i))) for cand_i in range(n_candidates) if gs.cv_results_['param_parameter'][cand_i] == FailingClassifier.FAILING_PARAMETER) ranks = gs.cv_results_['rank_test_score'] # Check that succeeded estimators have lower ranks assert ranks[0] <= 2 and ranks[1] <= 2 # Check that failed estimator has the highest rank assert ranks[clf.FAILING_PARAMETER] == 3 assert gs.best_index_ != clf.FAILING_PARAMETER def test_grid_search_failing_classifier_raise(): # GridSearchCV with on_error == 'raise' raises the error X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we want to test the behaviour of the grid search part gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score='raise') # FailingClassifier issues a ValueError so this is what we look for. assert_raises(ValueError, gs.fit, X, y) def test_parameters_sampler_replacement(): # raise warning if n_iter is bigger than total parameter space params = [{'first': [0, 1], 'second': ['a', 'b', 'c']}, {'third': ['two', 'values']}] sampler = ParameterSampler(params, n_iter=9) n_iter = 9 grid_size = 8 expected_warning = ('The total space of parameters %d is smaller ' 'than n_iter=%d. Running %d iterations. For ' 'exhaustive searches, use GridSearchCV.' % (grid_size, n_iter, grid_size)) assert_warns_message(UserWarning, expected_warning, list, sampler) # degenerates to GridSearchCV if n_iter the same as grid_size sampler = ParameterSampler(params, n_iter=8) samples = list(sampler) assert len(samples) == 8 for values in ParameterGrid(params): assert values in samples # test sampling without replacement in a large grid params = {'a': range(10), 'b': range(10), 'c': range(10)} sampler = ParameterSampler(params, n_iter=99, random_state=42) samples = list(sampler) assert len(samples) == 99 hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c']) for p in samples] assert len(set(hashable_samples)) == 99 # doesn't go into infinite loops params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params_distribution, n_iter=7) samples = list(sampler) assert len(samples) == 7 def test_stochastic_gradient_loss_param(): # Make sure the predict_proba works when loss is specified # as one of the parameters in the param_grid. param_grid = { 'loss': ['log'], } X = np.arange(24).reshape(6, -1) y = [0, 0, 0, 1, 1, 1] clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'), param_grid=param_grid, cv=3) # When the estimator is not fitted, `predict_proba` is not available as the # loss is 'hinge'. assert not hasattr(clf, "predict_proba") clf.fit(X, y) clf.predict_proba(X) clf.predict_log_proba(X) # Make sure `predict_proba` is not available when setting loss=['hinge'] # in param_grid param_grid = { 'loss': ['hinge'], } clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'), param_grid=param_grid, cv=3) assert not hasattr(clf, "predict_proba") clf.fit(X, y) assert not hasattr(clf, "predict_proba") def test_search_train_scores_set_to_false(): X = np.arange(6).reshape(6, -1) y = [0, 0, 0, 1, 1, 1] clf = LinearSVC(random_state=0) gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]}, cv=3) gs.fit(X, y) def test_grid_search_cv_splits_consistency(): # Check if a one time iterable is accepted as a cv parameter. n_samples = 100 n_splits = 5 X, y = make_classification(n_samples=n_samples, random_state=0) gs = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), return_train_score=True) gs.fit(X, y) gs2 = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=KFold(n_splits=n_splits), return_train_score=True) gs2.fit(X, y) # Give generator as a cv parameter assert isinstance(KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), GeneratorType) gs3 = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), return_train_score=True) gs3.fit(X, y) gs4 = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.2, 0.3]}, cv=KFold(n_splits=n_splits, shuffle=True, random_state=0), return_train_score=True) gs4.fit(X, y) def _pop_time_keys(cv_results): for key in ('mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time'): cv_results.pop(key) return cv_results # Check if generators are supported as cv and # that the splits are consistent np.testing.assert_equal(_pop_time_keys(gs3.cv_results_), _pop_time_keys(gs4.cv_results_)) # OneTimeSplitter is a non-re-entrant cv where split can be called only # once if ``cv.split`` is called once per param setting in GridSearchCV.fit # the 2nd and 3rd parameter will not be evaluated as no train/test indices # will be generated for the 2nd and subsequent cv.split calls. # This is a check to make sure cv.split is not called once per param # setting. np.testing.assert_equal({k: v for k, v in gs.cv_results_.items() if not k.endswith('_time')}, {k: v for k, v in gs2.cv_results_.items() if not k.endswith('_time')}) # Check consistency of folds across the parameters gs = GridSearchCV(LinearSVC(random_state=0), param_grid={'C': [0.1, 0.1, 0.2, 0.2]}, cv=KFold(n_splits=n_splits, shuffle=True), return_train_score=True) gs.fit(X, y) # As the first two param settings (C=0.1) and the next two param # settings (C=0.2) are same, the test and train scores must also be # same as long as the same train/test indices are generated for all # the cv splits, for both param setting for score_type in ('train', 'test'): per_param_scores = {} for param_i in range(4): per_param_scores[param_i] = list( gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i] for s in range(5)) assert_array_almost_equal(per_param_scores[0], per_param_scores[1]) assert_array_almost_equal(per_param_scores[2], per_param_scores[3]) def test_transform_inverse_transform_round_trip(): clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) grid_search.fit(X, y) X_round_trip = grid_search.inverse_transform(grid_search.transform(X)) assert_array_equal(X, X_round_trip) def test_custom_run_search(): def check_results(results, gscv): exp_results = gscv.cv_results_ assert sorted(results.keys()) == sorted(exp_results) for k in results: if not k.endswith('_time'): # XXX: results['params'] is a list :| results[k] = np.asanyarray(results[k]) if results[k].dtype.kind == 'O': assert_array_equal(exp_results[k], results[k], err_msg='Checking ' + k) else: assert_allclose(exp_results[k], results[k], err_msg='Checking ' + k) def fit_grid(param_grid): return GridSearchCV(clf, param_grid, return_train_score=True).fit(X, y) class CustomSearchCV(BaseSearchCV): def __init__(self, estimator, **kwargs): super().__init__(estimator, **kwargs) def _run_search(self, evaluate): results = evaluate([{'max_depth': 1}, {'max_depth': 2}]) check_results(results, fit_grid({'max_depth': [1, 2]})) results = evaluate([{'min_samples_split': 5}, {'min_samples_split': 10}]) check_results(results, fit_grid([{'max_depth': [1, 2]}, {'min_samples_split': [5, 10]}])) # Using regressor to make sure each score differs clf = DecisionTreeRegressor(random_state=0) X, y = make_classification(n_samples=100, n_informative=4, random_state=0) mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y) gscv = fit_grid([{'max_depth': [1, 2]}, {'min_samples_split': [5, 10]}]) results = mycv.cv_results_ check_results(results, gscv) for attr in dir(gscv): if (attr[0].islower() and attr[-1:] == '_' and attr not in {'cv_results_', 'best_estimator_', 'refit_time_', 'classes_'}): assert getattr(gscv, attr) == getattr(mycv, attr), \ "Attribute %s not equal" % attr def test__custom_fit_no_run_search(): class NoRunSearchSearchCV(BaseSearchCV): def __init__(self, estimator, **kwargs): super().__init__(estimator, **kwargs) def fit(self, X, y=None, groups=None, **fit_params): return self # this should not raise any exceptions NoRunSearchSearchCV(SVC()).fit(X, y) class BadSearchCV(BaseSearchCV): def __init__(self, estimator, **kwargs): super().__init__(estimator, **kwargs) with pytest.raises(NotImplementedError, match="_run_search not implemented."): # this should raise a NotImplementedError BadSearchCV(SVC()).fit(X, y) def test_empty_cv_iterator_error(): # Use global X, y # create cv cv = KFold(n_splits=3).split(X) # pop all of it, this should cause the expected ValueError [u for u in cv] # cv is empty now train_size = 100 ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) # assert that this raises an error with pytest.raises(ValueError, match='No fits were performed. ' 'Was the CV iterator empty\\? ' 'Were there no candidates\\?'): ridge.fit(X[:train_size], y[:train_size]) def test_random_search_bad_cv(): # Use global X, y class BrokenKFold(KFold): def get_n_splits(self, *args, **kw): return 1 # create bad cv cv = BrokenKFold(n_splits=3) train_size = 100 ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) # assert that this raises an error with pytest.raises(ValueError, match='cv.split and cv.get_n_splits returned ' 'inconsistent results. Expected \\d+ ' 'splits, got \\d+'): ridge.fit(X[:train_size], y[:train_size]) def test_n_features_in(): # make sure grid search and random search delegate n_features_in to the # best estimator n_features = 4 X, y = make_classification(n_features=n_features) gbdt = HistGradientBoostingClassifier() param_grid = {'max_iter': [3, 4]} gs = GridSearchCV(gbdt, param_grid) rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1) assert not hasattr(gs, 'n_features_in_') assert not hasattr(rs, 'n_features_in_') gs.fit(X, y) rs.fit(X, y) assert gs.n_features_in_ == n_features assert rs.n_features_in_ == n_features def test_search_cv__pairwise_property_delegated_to_base_estimator(): """ Test implementation of BaseSearchCV has the _pairwise property which matches the _pairwise property of its estimator. This test make sure _pairwise is delegated to the base estimator. Non-regression test for issue #13920. """ est = BaseEstimator() attr_message = "BaseSearchCV _pairwise property must match estimator" for _pairwise_setting in [True, False]: setattr(est, '_pairwise', _pairwise_setting) cv = GridSearchCV(est, {'n_neighbors': [10]}) assert _pairwise_setting == cv._pairwise, attr_message def test_search_cv__pairwise_property_equivalence_of_precomputed(): """ Test implementation of BaseSearchCV has the _pairwise property which matches the _pairwise property of its estimator. This test ensures the equivalence of 'precomputed'. Non-regression test for issue #13920. """ n_samples = 50 n_splits = 2 X, y = make_classification(n_samples=n_samples, random_state=0) grid_params = {'n_neighbors': [10]} # defaults to euclidean metric (minkowski p = 2) clf = KNeighborsClassifier() cv = GridSearchCV(clf, grid_params, cv=n_splits) cv.fit(X, y) preds_original = cv.predict(X) # precompute euclidean metric to validate _pairwise is working X_precomputed = euclidean_distances(X) clf = KNeighborsClassifier(metric='precomputed') cv = GridSearchCV(clf, grid_params, cv=n_splits) cv.fit(X_precomputed, y) preds_precomputed = cv.predict(X_precomputed) attr_message = "GridSearchCV not identical with precomputed metric" assert (preds_original == preds_precomputed).all(), attr_message @pytest.mark.parametrize( "SearchCV, param_search", [(GridSearchCV, {'a': [0.1, 0.01]}), (RandomizedSearchCV, {'a': uniform(1, 3)})] ) def test_scalar_fit_param(SearchCV, param_search): # unofficially sanctioned tolerance for scalar values in fit_params # non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/15805 class TestEstimator(BaseEstimator, ClassifierMixin): def __init__(self, a=None): self.a = a def fit(self, X, y, r=None): self.r_ = r def predict(self, X): return np.zeros(shape=(len(X))) model = SearchCV(TestEstimator(), param_search) X, y = make_classification(random_state=42) model.fit(X, y, r=42) assert model.best_estimator_.r_ == 42 @pytest.mark.parametrize( "SearchCV, param_search", [(GridSearchCV, {'alpha': [0.1, 0.01]}), (RandomizedSearchCV, {'alpha': uniform(0.01, 0.1)})] ) def test_scalar_fit_param_compat(SearchCV, param_search): # check support for scalar values in fit_params, for instance in LightGBM # that do not exactly respect the scikit-learn API contract but that we do # not want to break without an explicit deprecation cycle and API # recommendations for implementing early stopping with a user provided # validation set. non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/15805 X_train, X_valid, y_train, y_valid = train_test_split( *make_classification(random_state=42), random_state=42 ) class _FitParamClassifier(SGDClassifier): def fit(self, X, y, sample_weight=None, tuple_of_arrays=None, scalar_param=None, callable_param=None): super().fit(X, y, sample_weight=sample_weight) assert scalar_param > 0 assert callable(callable_param) # The tuple of arrays should be preserved as tuple. assert isinstance(tuple_of_arrays, tuple) assert tuple_of_arrays[0].ndim == 2 assert tuple_of_arrays[1].ndim == 1 return self def _fit_param_callable(): pass model = SearchCV( _FitParamClassifier(), param_search ) # NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which # is not the case for the following parameters. But this abuse is common in # popular third-party libraries and we should tolerate this behavior for # now and be careful not to break support for those without following # proper deprecation cycle. fit_params = { 'tuple_of_arrays': (X_valid, y_valid), 'callable_param': _fit_param_callable, 'scalar_param': 42, } model.fit(X_train, y_train, **fit_params)
2.0625
2
src/economy/migrations/0027_zettlebalance_zettlereceipt.py
bornhack/bornhack-website
7
190
<reponame>bornhack/bornhack-website # Generated by Django 3.2.7 on 2021-09-13 03:52 import uuid from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("economy", "0026_alter_clearhaussettlement_options"), ] operations = [ migrations.CreateModel( name="ZettleBalance", fields=[ ( "uuid", models.UUIDField( default=uuid.uuid4, editable=False, primary_key=True, serialize=False, ), ), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ( "statement_time", models.DateTimeField( help_text="The date and time this movement was added to the account statement." ), ), ( "payment_time", models.DateTimeField( blank=True, help_text="The date and time this payment was made. Can be empty if this transaction is not a customer payment.", null=True, ), ), ( "payment_reference", models.IntegerField( blank=True, help_text="The reference for this payment. Can be empty if this transaction is not a customer payment.", null=True, ), ), ( "description", models.CharField( help_text="The description of this transaction.", max_length=100 ), ), ( "amount", models.DecimalField( decimal_places=2, help_text="The amount of this transaction", max_digits=12, ), ), ( "balance", models.DecimalField( decimal_places=2, help_text="Our balance in Zettles systems after this transaction.", max_digits=12, ), ), ], options={ "ordering": ["-statement_time"], "get_latest_by": ["statement_time"], }, ), migrations.CreateModel( name="ZettleReceipt", fields=[ ( "uuid", models.UUIDField( default=uuid.uuid4, editable=False, primary_key=True, serialize=False, ), ), ("created", models.DateTimeField(auto_now_add=True)), ("updated", models.DateTimeField(auto_now=True)), ( "zettle_created", models.DateTimeField( help_text="The date and time this receipt was created in Zettles end" ), ), ( "receipt_number", models.IntegerField(help_text="The Zettle receipt number."), ), ( "vat", models.DecimalField( decimal_places=2, help_text="The part of the total amount which is VAT", max_digits=12, ), ), ( "total", models.DecimalField( decimal_places=2, help_text="The total amount the customer paid", max_digits=12, ), ), ( "fee", models.DecimalField( decimal_places=2, help_text="The payment fee BornHack has to pay to receive this payment", max_digits=12, ), ), ( "net", models.DecimalField( decimal_places=2, help_text="The part of the payment which goes to BornHack after fees have been substracted.", max_digits=12, ), ), ( "payment_method", models.CharField(help_text="The payment method", max_length=100), ), ( "card_issuer", models.CharField( blank=True, help_text="The card issuer. Can be empty if this was not a card payment.", max_length=100, null=True, ), ), ( "staff", models.CharField( help_text="The Zettle account which was used to make this sale.", max_length=100, ), ), ( "description", models.CharField( help_text="The description of this transaction.", max_length=255 ), ), ("sold_via", models.CharField(help_text="Always POS?", max_length=100)), ], options={ "ordering": ["-zettle_created"], "get_latest_by": ["zettle_created"], }, ), ]
2.1875
2
main.py
vkumarma/Complete-Interpreter
0
191
<gh_stars>0 import re import sys class Lexer: def __init__(self, inp_str): self.index = 0 self.s = inp_str def get_char(self): if self.index < len(self.s): var = self.s[self.index] self.index += 1 return var input_file = open(str(sys.argv[1]), 'r') # Open file for reading line = input_file.read() # "if z then while x * 4 - 2 do skip endwhile else x := 7 endif; y := 1" input_string = line.strip("\n") lexer = Lexer(input_string) hashtable = {} tokens_list = [] def token_check(input): if re.fullmatch("if|then|else|endif|while|do|endwhile|skip", input): hashtable[input] = "KEYWORD" tokens_list.append(input) elif re.search("([a-z]|[A-Z])([a-z]|[A-Z]|[0-9])*", input): hashtable[input] = "IDENTIFIER" tokens_list.append(input) elif re.search("[0-9]+", input): hashtable[input] = "NUMBER" tokens_list.append(input) elif re.fullmatch("\+|\-|\*|/|\(|\)|:=|;", input): hashtable[input] = "SYMBOL" tokens_list.append(input) else: hashtable[input] = "ERROR READING" def digit(curr_char, lexer): sub = "" while (curr_char.isdigit()): sub += curr_char curr_char = lexer.get_char() if curr_char == None: break new.append(curr_char) return sub def longest_sub_string(curr_char, lexer): sub = "" while (curr_char.isalpha() or curr_char.isdigit()): sub += curr_char curr_char = lexer.get_char() if curr_char == None: break new.append(curr_char) return sub def symbol(curr_char, lexer): # print(curr_char) sym = curr_char curr_char = lexer.get_char() new.append(curr_char) return sym def assignment(curr_char, lexer): sub = curr_char next_char = lexer.get_char() if next_char == "=": sub += next_char new.append(next_char) return sub new.append(lexer.get_char()) return sub new = [] # keeping track of current char. curr_char = lexer.get_char() while (curr_char != None): while (curr_char == ' ' or curr_char == ''): curr_char = lexer.get_char() if (curr_char.isdigit()): token_check(digit(curr_char, lexer)) curr_char = new.pop() elif (curr_char.isalpha()): token_check(longest_sub_string(curr_char, lexer)) curr_char = new.pop() elif curr_char in "+-/*();": token_check(symbol(curr_char, lexer)) curr_char = new.pop() elif curr_char == ":": token_check(assignment(curr_char, lexer)) curr_char = new.pop() if curr_char == "=": curr_char = lexer.get_char() else: token_check(curr_char) curr_char = lexer.get_char() def tokens(): return hashtable # print(tokens_list) # print(tokens())
3.203125
3
deduplicate.py
Ghostofapacket/NewsGrabber-Deduplicate
0
192
<filename>deduplicate.py import sys sys.path.append('/usr/local/lib/python3.4/site-packages/') from warc_dedup import deduplicate def main(): if len(sys.argv) == 1: raise Exception('Please provide the WARC file as argument.') deduplicate.Warc(*sys.argv[1:]).deduplicate() if __name__ == '__main__': main()
2.3125
2
build/lib/FinMesh/usgov/__init__.py
johnjdailey/FinMesh
1
193
<reponame>johnjdailey/FinMesh import os import requests import xmltodict import csv import json # # # # # # # # # # # FRED DATA BELOW # # # # # # # # # # # FRED_BASE_URL = 'https://api.stlouisfed.org/fred/' GEOFRED_BASE_URL = 'https://api.stlouisfed.org/geofred/' def append_fred_token(url): token = os.getenv('FRED_TOKEN') return f'{url}&api_key={token}' FRED_SERIES_OBS_URL = FRED_BASE_URL + 'series/observations?' def fred_series(series, file_type=None, realtime_start=None, realtime_end=None, limit=None, offset=None, sort_order=None, observation_start=None, observation_end=None, units=None, frequency=None, aggregation_method=None, output_type=None, vintage_dates=None): ## Returns time series historical data for the requested FRED data. url = FRED_SERIES_OBS_URL + f'series_id={series}' if file_type: url += f'&file_type={file_type}' if realtime_start: url += f'&realtime_start={realtime_start}' if realtime_end: url += f'&realtime_end={realtime_end}' if limit: url += f'&limit={limit}' if offset: url += f'&offset={offset}' if sort_order: url += f'&sort_order={sort_order}' if observation_start: url += f'&observation_start={observation_start}' if observation_end: url += f'&observation_end={observation_end}' if units: url += f'&units={units}' if frequency: url += f'&frequency={frequency}' if aggregation_method: url += f'&aggregation_method={aggregation_method}' if output_type: url += f'&output_type={output_type}' if vintage_dates: url += f'&vintage_dates={vintage_dates}' url = append_fred_token(url) result = requests.get(url) return result.text GEOFRED_SERIES_META_URL = GEOFRED_BASE_URL + 'series/group?' def geofred_series_meta(series_id, file_type=None): ## Returns meta data for the requested FRED data. url = GEOFRED_SERIES_META_URL + f'series_id={series_id}' if file_type: url += f'&file_type={file_type}' url = append_fred_token(url) result = requests.get(url) return result.text GEOFRED_REGIONAL_SERIES_URL = GEOFRED_BASE_URL + 'series/data?' def geofred_regional_series(series_id, file_type=None, date=None, start_date=None): ## Returns the historical, geographically organized time series data for the requested FRED data. url = GEOFRED_REGIONAL_SERIES_URL + f'series_id={series_id}' if file_type: url += f'&file_type={file_type}' if date: url += f'&date={date}' if start_date: url += f'&start_date={start_date}' url = append_fred_token(url) result = requests.get(url) return result.text # # # # # # # # # # # # # # # # # GOVERNMENT YIELD CURVE DATA # # # # # # # # # # # # # # # # # GOV_YIELD_URL = 'https://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData?$filter=month(NEW_DATE)%20eq%204%20and%20year(NEW_DATE)%20eq%202019' def get_yield(): ## Returns government treasury bond yields. Organized in Python dictionary format by bond length. # Formatting of XML to Python Dict curve = requests.get(GOV_YIELD_URL) parse_curve = xmltodict.parse(curve.content) # This is based around retrieving the n last dates or average of n days. feed = parse_curve['feed'] entry = feed['entry'] last_entry = len(entry)-1 content = entry[last_entry]['content']['m:properties'] # Dict that contains the whole yield curve so there is no need to bring in each rate. yield_curve_values = { 'date' : entry[last_entry]['content']['m:properties']['d:NEW_DATE']['#text'], '1month' : float(content['d:BC_1MONTH']['#text']), '2month' : float(content['d:BC_2MONTH']['#text']), '3month' : float(content['d:BC_3MONTH']['#text']), '6month' : float(content['d:BC_6MONTH']['#text']), '1year' : float(content['d:BC_1YEAR']['#text']), '2year' : float(content['d:BC_2YEAR']['#text']), '3year' : float(content['d:BC_3YEAR']['#text']), '5year' : float(content['d:BC_5YEAR']['#text']), '10year' : float(content['d:BC_10YEAR']['#text']), '20year' : float(content['d:BC_20YEAR']['#text']), '30year' : float(content['d:BC_30YEAR']['#text']), } return yield_curve_values
2.609375
3
settings.py
Cradac/mattermost-octane-integration
0
194
''' This is the Settings File for the Mattermost-Octane Bridge. You can change various variables here to customize and set up the client. ''' '''----------------------Mattermost Webhook Configuration----------------------''' #URL of the webhook from mattermost. To create one go to `Main Menu -> Integrations -> Incoming Webhooks` and press `Add Incoming Webhook` mm_webhook_url = 'http://localhost:8065/hooks/yuro8xrfeffj787cj1bwc4ziue' #Override the channel to send the notifications to, use the channel name as a String mm_channel = None #Set a custom Username to display in Mattermost mm_username = 'Defect Notification' #Set a custom Profile Image for the Client mm_profileimage = 'https://i.imgur.com/7Wg3Tgs.png' #Telekom T Image #The latter two need to be enabled in the settings.json of the Mattermost server '''----------------------------Flask Configuration----------------------------''' #set external IP for the Flask Server to create a Webhook for ALM Octane #local: 127.0.0.1 / False #default external: 0.0.0.0 (will default to only available external adress) external_ip = False #default: 5000 port = 5000 #external webhook verify token can be set here, if set as `None` it will be autogenerated & changed on each startup. wh_token = None
1.632813
2
python/testData/console/indent7.after.py
jnthn/intellij-community
2
195
print(1)
1.375
1
tools/aerial_detection.py
gfjiangly/AerialDetection
0
196
# -*- encoding:utf-8 -*- # @Time : 2021/1/3 15:15 # @Author : gfjiang import os.path as osp import mmcv import numpy as np import cvtools import matplotlib.pyplot as plt import cv2.cv2 as cv from functools import partial import torch import math from cvtools.utils.path import add_prefix_filename_suffix from mmdet.ops import nms from mmdet.apis import init_detector, inference_detector def draw_features(module, input, output, work_dir='./'): x = output.cpu().numpy() out_channels = list(output.shape)[1] height = int(math.sqrt(out_channels)) width = height if list(output.shape)[2] < 128: return fig = plt.figure(figsize=(32, 32)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05) for i in range(height * width): plt.subplot(height, width, i + 1) plt.axis('off') img = x[0, i, :, :] pmin = np.min(img) pmax = np.max(img) img = ((img - pmin) / (pmax - pmin + 0.000001))*255 # float在[0,1]之间,转换成0-255 img = img.astype(np.uint8) # 转成unit8 img = cv.applyColorMap(img, cv.COLORMAP_JET) # 生成heat map img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的 plt.imshow(img) # print("{}/{}".format(i,width*height)) savename = get_image_name_for_hook(module, work_dir) fig.savefig(savename, dpi=100) fig.clf() plt.close() def get_image_name_for_hook(module, work_dir='./'): """ Generate image filename for hook function Parameters: ----------- module: module of neural network """ # os.makedirs(work_dir, exist_ok=True) module_name = str(module) base_name = module_name.split('(')[0] index = 0 image_name = '.' # '.' is surely exist, to make first loop condition True while osp.exists(image_name): index += 1 image_name = osp.join( work_dir, 'feats', '%s_%d.png' % (base_name, index)) return image_name class AerialDetectionOBB(object): def __init__(self, config, pth): self.imgs = [] self.cfg = mmcv.Config.fromfile(config) self.pth = pth print('loading model {} ...'.format(pth)) self.model = init_detector(self.cfg, self.pth, device='cuda:0') self.results = [] self.img_detected = [] # self.vis_feats((torch.nn.Conv2d, torch.nn.MaxPool2d)) def __call__(self, imgs_or_path, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): if isinstance(imgs_or_path, str): self.imgs += cvtools.get_files_list(imgs_or_path) else: self.imgs += imgs_or_path prog_bar = mmcv.ProgressBar(len(self.imgs)) for _, img in enumerate(self.imgs): self.detect(img, det_thrs=det_thrs, vis=vis, vis_thr=vis_thr, save_root=save_root) prog_bar.update() def detect(self, img, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): result = inference_detector(self.model, img) # result = self.nms(result) if isinstance(det_thrs, float): det_thrs = [det_thrs] * len(result) if vis: to_file = osp.join(save_root, osp.basename(img)) to_file = add_prefix_filename_suffix(to_file, suffix='_obb') self.vis(img, result, vis_thr=vis_thr, to_file=to_file) result = [det[det[..., -1] > det_thr] for det, det_thr in zip(result, det_thrs)] if len(result) == 0: print('detect: image {} has no object.'.format(img)) self.img_detected.append(img) self.results.append(result) return result def nms(self, result, nms_th=0.3): dets_num = [len(det_cls) for det_cls in result] result = np.vstack(result) _, ids = nms(result, nms_th) total_num = 0 nms_result = [] for num in dets_num: ids_cls = ids[np.where((total_num <= ids) & (ids < num))[0]] nms_result.append(result[ids_cls]) total_num += num return nms_result def vis(self, img, bbox_result, vis_thr=0.5, to_file='vis.jpg'): bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) inds = np.where(bboxes[:, -1] > vis_thr)[0] bboxes = bboxes[inds] labels = labels[inds] texts = [self.model.CLASSES[index]+'|'+str(round(bbox[-1], 2)) for index, bbox in zip(labels, bboxes)] img = cvtools.draw_boxes_texts( img, bboxes[:, :-1], box_format='polygon', line_width=2) cvtools.imwrite(img, to_file) def vis_feats(self, modules_for_plot): h, w = self.cfg.data.train.img_scale for name, module in self.model.named_modules(): if isinstance(module, modules_for_plot): draw_features_func = partial( draw_features, work_dir=self.cfg.work_dir) module.register_forward_hook(draw_features_func) def save_results(self, save): str_results = '' for i, img in enumerate(self.img_detected): result = self.results[i] img = osp.basename(img) for cls_index, dets in enumerate(result): cls = self.model.CLASSES[cls_index] for box in dets: bbox_str = ','.join(map(str, map(int, box[:4]))) str_results += ' '.join([img, cls, bbox_str]) + '\n' with open(save, 'w') as f: f.write(str_results) if __name__ == '__main__': config_file = 'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2.py' pth_file = 'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/epoch_12.pth' detector = AerialDetectionOBB(config_file, pth_file) detector('/media/data/DOTA/crop/P2701_2926_1597_3949_2620.png', vis=True, save_root='work_dirs/attention_vis/') detector.save_results('work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/detect_result.txt')
2.375
2
tradingAPI/low_level.py
federico123579/Trading212-API
44
197
# -*- coding: utf-8 -*- """ tradingAPI.low_level ~~~~~~~~~~~~~~ This module provides the low level functions with the service. """ import time import re from datetime import datetime from pyvirtualdisplay import Display from bs4 import BeautifulSoup from splinter import Browser from .glob import Glob from .links import path from .utils import num, expect, get_pip # exceptions from tradingAPI import exceptions import selenium.common.exceptions # logging import logging logger = logging.getLogger('tradingAPI.low_level') class Stock(object): """base class for stocks""" def __init__(self, product): self.product = product self.market = True self.records = [] def new_rec(self, rec): """add a record""" self.records.append(rec) return self.records class Movement(object): """class-storing movement""" def __init__(self, product, quantity, mode, price): self.product = product self.quantity = quantity self.mode = mode self.price = price class PurePosition(object): """class-storing position""" def __init__(self, product, quantity, mode, price): self.product = product self.quantity = quantity self.mode = mode self.price = price def __repr__(self): return ' - '.join([str(self.product), str(self.quantity), str(self.mode), str(self.price)]) class LowLevelAPI(object): """low level api to interface with the service""" def __init__(self, brow="firefox"): self.brow_name = brow self.positions = [] self.movements = [] self.stocks = [] # init globals Glob() def launch(self): """launch browser and virtual display, first of all to be launched""" try: # init virtual Display self.vbro = Display() self.vbro.start() logger.debug("virtual display launched") except Exception: raise exceptions.VBroException() try: self.browser = Browser(self.brow_name) logger.debug(f"browser {self.brow_name} launched") except Exception: raise exceptions.BrowserException( self.brow_name, "failed to launch") return True def css(self, css_path, dom=None): """css find function abbreviation""" if dom is None: dom = self.browser return expect(dom.find_by_css, args=[css_path]) def css1(self, css_path, dom=None): """return the first value of self.css""" if dom is None: dom = self.browser def _css1(path, domm): """virtual local func""" return self.css(path, domm)[0] return expect(_css1, args=[css_path, dom]) def search_name(self, name, dom=None): """name find function abbreviation""" if dom is None: dom = self.browser return expect(dom.find_by_name, args=[name]) def xpath(self, xpath, dom=None): """xpath find function abbreviation""" if dom is None: dom = self.browser return expect(dom.find_by_xpath, args=[xpath]) def elCss(self, css_path, dom=None): """check if element is present by css""" if dom is None: dom = self.browser return expect(dom.is_element_present_by_css, args=[css_path]) def elXpath(self, xpath, dom=None): """check if element is present by css""" if dom is None: dom = self.browser return expect(dom.is_element_present_by_xpath, args=[xpath]) def login(self, username, password, mode="demo"): """login function""" url = "https://trading212.com/it/login" try: logger.debug(f"visiting %s" % url) self.browser.visit(url) logger.debug(f"connected to %s" % url) except selenium.common.exceptions.WebDriverException: logger.critical("connection timed out") raise try: self.search_name("login[username]").fill(username) self.search_name("login[password]").fill(password) self.css1(path['log']).click() # define a timeout for logging in timeout = time.time() + 30 while not self.elCss(path['logo']): if time.time() > timeout: logger.critical("login failed") raise CredentialsException(username) time.sleep(1) logger.info(f"logged in as {username}") # check if it's a weekend if mode == "demo" and datetime.now().isoweekday() in range(5, 8): timeout = time.time() + 10 while not self.elCss(path['alert-box']): if time.time() > timeout: logger.warning("weekend trading alert-box not closed") break if self.elCss(path['alert-box']): self.css1(path['alert-box']).click() logger.debug("weekend trading alert-box closed") except Exception as e: logger.critical("login failed") raise exceptions.BaseExc(e) return True def logout(self): """logout func (quit browser)""" try: self.browser.quit() except Exception: raise exceptions.BrowserException(self.brow_name, "not started") return False self.vbro.stop() logger.info("logged out") return True def get_bottom_info(self, info): accepted_values = { 'free_funds': 'equity-free', 'account_value': 'equity-total', 'live_result': 'equity-ppl', 'used_margin': 'equity-margin'} try: info_label = accepted_values[info] val = self.css1("div#%s span.equity-item-value" % info_label).text return num(val) except KeyError as e: raise exceptions.BaseExc(e) def get_price(self, name): soup = BeautifulSoup( self.css1("div.scrollable-area-content").html, "html.parser") for product in soup.select("div.tradebox"): fullname = product.select("span.instrument-name")[0].text.lower() if name.lower() in fullname: mark_closed_list = [x for x in product.select( "div.quantity-list-input-wrapper") if x.select( "div.placeholder")[0].text.lower().find("close") != -1] if mark_closed_list: sell_price = product.select("div.tradebox-price-sell")[0]\ .text return float(sell_price) else: return False class MovementWindow(object): """add movement window""" def __init__(self, api, product): self.api = api self.product = product self.state = 'initialized' self.insfu = False def open(self, name_counter=None): """open the window""" if self.api.css1(path['add-mov']).visible: self.api.css1(path['add-mov']).click() else: self.api.css1('span.dataTable-no-data-action').click() logger.debug("opened window") self.api.css1(path['search-box']).fill(self.product) if self.get_result(0) is None: self.api.css1(path['close']).click() raise exceptions.ProductNotFound(self.product) result, product = self.search_res(self.product, name_counter) result.click() if self.api.elCss("div.widget_message"): self.decode(self.api.css1("div.widget_message")) self.product = product self.state = 'open' def _check_open(self): if self.state == 'open': return True else: raise exceptions.WindowException() def close(self): """close a movement""" self._check_open() self.api.css1(path['close']).click() self.state = 'closed' logger.debug("closed window") def confirm(self): """confirm the movement""" self._check_open() self.get_price() self.api.css1(path['confirm-btn']).click() widg = self.api.css("div.widget_message") if widg: self.decode(widg[0]) raise exceptions.WidgetException(widg) if all(x for x in ['quantity', 'mode'] if hasattr(self, x)): self.api.movements.append(Movement( self.product, self.quantity, self.mode, self.price)) logger.debug("%s movement appended to the list" % self.product) self.state = 'conclused' logger.debug("confirmed movement") def search_res(self, res, check_counter=None): """search for a res""" logger.debug("searching result") result = self.get_result(0) name = self.get_research_name(result) x = 0 while not self.check_name(res, name, counter=check_counter): name = self.get_research_name(self.get_result(x)) if name is None: self.api.css1(path['close']).click() raise exceptions.ProductNotFound(res) logger.debug(name) if self.check_name(res, name, counter=check_counter): return self.get_result(x) x += 1 logger.debug("found product at position %d" % (x + 1)) return result, name def check_name(self, name, string, counter=None): """if both in string return False""" name = name.lower() string = string.lower() if counter is None: if name in string: return True else: return False counter = counter.lower() if name in string and counter in string: logger.debug("check_name: counter found in string") return False elif name in string and counter not in string: return True else: return False def get_research_name(self, res): """return result name""" if res is None: return None return self.api.css1("span.instrument-name", res).text def get_result(self, pos): """get pos result, where 0 is first""" evalxpath = path['res'] + f"[{pos + 1}]" try: res = self.api.xpath(evalxpath)[0] return res except Exception: return None def set_limit(self, category, mode, value): """set limit in movement window""" self._check_open() if (mode not in ["unit", "value"] or category not in ["gain", "loss", "both"]): raise ValueError() if not hasattr(self, 'stop_limit'): self.stop_limit = {'gain': {}, 'loss': {}} logger.debug("initialized stop_limit") if category == 'gain': self.api.xpath( path['limit-gain-%s' % mode])[0].fill(str(value)) elif category == 'loss': self.api.xpath( path['limit-loss-%s' % mode])[0].fill(str(value)) if category != 'both': self.stop_limit[category]['mode'] = mode self.stop_limit[category]['value'] = value elif category == 'both': self.api.xpath( path['limit-gain-%s' % mode])[0].fill(str(value)) self.api.xpath( path['limit-loss-%s' % mode])[0].fill(str(value)) for cat in ['gain', 'loss']: self.stop_limit[cat]['mode'] = mode self.stop_limit[cat]['value'] = value logger.debug("set limit") def decode(self, message): """decode text pop-up""" title = self.api.css1("div.title", message).text text = self.api.css1("div.text", message).text if title == "Insufficient Funds": self.insfu = True elif title == "Maximum Quantity Limit": raise exceptions.MaxQuantLimit(num(text)) elif title == "Minimum Quantity Limit": raise exceptions.MinQuantLimit(num(text)) logger.debug("decoded message") def decode_update(self, message, value, mult=0.1): """decode and update the value""" try: msg_text = self.api.css1("div.text", message).text return num(msg_text) except Exception: if msg_text.lower().find("higher") != -1: value += value * mult return value else: self.decode(message) return None def get_mov_margin(self): """get the margin of the movement""" self._check_open() return num(self.api.css1("span.cfd-order-info-item-value").text) def set_mode(self, mode): """set mode (buy or sell)""" self._check_open() if mode not in ["buy", "sell"]: raise ValueError() self.api.css1(path[mode + '-btn']).click() self.mode = mode logger.debug("mode set") def get_quantity(self): """gte current quantity""" self._check_open() quant = int(num(self.api.css1(path['quantity']).value)) self.quantity = quant return quant def set_quantity(self, quant): """set quantity""" self._check_open() self.api.css1(path['quantity']).fill(str(int(quant))) self.quantity = quant logger.debug("quantity set") def get_price(self, mode='buy'): """get current price""" if mode not in ['buy', 'sell']: raise ValueError() self._check_open() price = num(self.api.css1( "div.orderdialog div.tradebox-price-%s" % mode).text) self.price = price return price def get_unit_value(self): """get unit value of stock based on margin, memoized""" # find in the collection try: unit_value = Glob().theCollector.collection['unit_value'] unit_value_res = unit_value[self.product] logger.debug("unit_value found in the collection") return unit_value_res except KeyError: logger.debug("unit_value not found in the collection") pip = get_pip(mov=self) quant = 1 / pip if hasattr(self, 'quantity'): old_quant == self.quantity self.set_quantity(quant) # update the site time.sleep(0.5) margin = self.get_mov_margin() logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}") if 'old_quant' in locals(): self.set_quantity(old_quant) unit_val = margin / quant self.unit_value = unit_val Glob().unit_valueHandler.add_val({self.product: unit_val}) return unit_val def new_mov(self, name): """factory method pattern""" return self.MovementWindow(self, name) class Position(PurePosition): """position object""" def __init__(self, api, html_div): """initialized from div""" self.api = api if isinstance(html_div, type('')): self.soup_data = BeautifulSoup(html_div, 'html.parser') else: self.soup_data = html_div self.product = self.soup_data.select("td.name")[0].text self.quantity = num(self.soup_data.select("td.quantity")[0].text) if ("direction-label-buy" in self.soup_data.select("td.direction")[0].span['class']): self.mode = 'buy' else: self.mode = 'sell' self.price = num(self.soup_data.select("td.averagePrice")[0].text) self.margin = num(self.soup_data.select("td.margin")[0].text) self.id = self.find_id() def update(self, soup): """update the soup""" self.soup_data = soup return soup def find_id(self): """find pos ID with with given data""" pos_id = self.soup_data['id'] self.id = pos_id return pos_id @property def close_tag(self): """obtain close tag""" return f"#{self.id} div.close-icon" def close(self): """close position via tag""" self.api.css1(self.close_tag).click() try: self.api.xpath(path['ok_but'])[0].click() except selenium.common.exceptions.ElementNotInteractableException: if (self.api.css1('.widget_message div.title').text == 'Market Closed'): logger.error("market closed, position can't be closed") raise exceptions.MarketClosed() raise exceptions.WidgetException( self.api.css1('.widget_message div.text').text) # wait until it's been closed # set a timeout timeout = time.time() + 10 while self.api.elCss(self.close_tag): time.sleep(0.1) if time.time() > timeout: raise TimeoutError("failed to close pos %s" % self.id) logger.debug("closed pos %s" % self.id) def get_gain(self): """get current profit""" gain = num(self.soup_data.select("td.ppl")[0].text) self.gain = gain return gain def bind_mov(self): """bind the corresponding movement""" logger = logging.getLogger("tradingAPI.low_level.bind_mov") mov_list = [x for x in self.api.movements if x.product == self.product and x.quantity == self.quantity and x.mode == self.mode] if not mov_list: logger.debug("fail: mov not found") return None else: logger.debug("success: found movement") for x in mov_list: # find approximate price max_roof = self.price + self.price * 0.01 min_roof = self.price - self.price * 0.01 if min_roof < x.price < max_roof: logger.debug("success: price corresponding") # bind mov self.mov = x return x else: logger.debug("fail: price %f not corresponding to %f" % (self.price, x.price)) continue # if nothing, return None return None def new_pos(self, html_div): """factory method pattern""" pos = self.Position(self, html_div) pos.bind_mov() self.positions.append(pos) return pos
2.640625
3
.infra/setup/playbooks/roles/ansible.kubernetes-modules/library/openshift_v1_build_config_list.py
cvicens/lab-knative
0
198
<reponame>cvicens/lab-knative #!/usr/bin/python # -*- coding: utf-8 -*- from ansible.module_utils.openshift_common import OpenShiftAnsibleModule, OpenShiftAnsibleException DOCUMENTATION = ''' module: openshift_v1_build_config_list short_description: OpenShift BuildConfigList description: - Retrieve a list of build_configs. List operations provide a snapshot read of the underlying objects, returning a resource_version representing a consistent version of the listed objects. version_added: 2.3.0 author: OpenShift (@openshift) options: api_key: description: - Token used to connect to the API. cert_file: description: - Path to a certificate used to authenticate with the API. type: path context: description: - The name of a context found in the Kubernetes config file. debug: description: - Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log default: false type: bool force: description: - If set to C(True), and I(state) is C(present), an existing object will updated, and lists will be replaced, rather than merged. default: false type: bool host: description: - Provide a URL for acessing the Kubernetes API. key_file: description: - Path to a key file used to authenticate with the API. type: path kubeconfig: description: - Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the openshift client will attempt to load the default configuration file from I(~/.kube/config.json). type: path password: description: - Provide a password for connecting to the API. Use in conjunction with I(username). resource_definition: description: - Provide the YAML definition for the object, bypassing any modules parameters intended to define object attributes. type: dict src: description: - Provide a path to a file containing the YAML definition of the object. Mutually exclusive with I(resource_definition). type: path ssl_ca_cert: description: - Path to a CA certificate used to authenticate with the API. type: path state: description: - Determines if an object should be created, patched, or deleted. When set to C(present), the object will be created, if it does not exist, or patched, if parameter values differ from the existing object's attributes, and deleted, if set to C(absent). A patch operation results in merging lists and updating dictionaries, with lists being merged into a unique set of values. If a list contains a dictionary with a I(name) or I(type) attribute, a strategic merge is performed, where individual elements with a matching I(name_) or I(type) are merged. To force the replacement of lists, set the I(force) option to C(True). default: present choices: - present - absent username: description: - Provide a username for connecting to the API. verify_ssl: description: - Whether or not to verify the API server's SSL certificates. type: bool requirements: - openshift == 0.3.3 ''' EXAMPLES = ''' ''' RETURN = ''' api_version: type: string description: Requested API version build_config_list: type: complex returned: when I(state) = C(present) contains: api_version: description: - APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. type: str items: description: - items is a list of build configs type: list contains: api_version: description: - APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. type: str kind: description: - Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. type: str metadata: description: - metadata for BuildConfig. type: complex contains: annotations: description: - Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. type: complex contains: str, str cluster_name: description: - The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. type: str creation_timestamp: description: - CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. type: complex contains: {} deletion_grace_period_seconds: description: - Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. type: int deletion_timestamp: description: - DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. type: complex contains: {} finalizers: description: - Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. type: list contains: str generate_name: description: - GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. type: str generation: description: - A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. type: int initializers: description: - An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects. When an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user. type: complex contains: pending: description: - Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients. type: list contains: name: description: - name of the process that is responsible for initializing this object. type: str result: description: - If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion. type: complex contains: api_version: description: - APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. type: str code: description: - Suggested HTTP return code for this status, 0 if not set. type: int details: description: - Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. type: complex contains: causes: description: - The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes. type: list contains: field: description: - 'The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. Examples: "name" - the field "name" on the current resource "items[0].name" - the field "name" on the first array entry in "items"' type: str message: description: - A human-readable description of the cause of the error. This field may be presented as-is to a reader. type: str reason: description: - A machine-readable description of the cause of the error. If this value is empty there is no information available. type: str group: description: - The group attribute of the resource associated with the status StatusReason. type: str kind: description: - The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. type: str name: description: - The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described). type: str retry_after_seconds: description: - If specified, the time in seconds before the operation should be retried. type: int uid: description: - UID of the resource. (when there is a single resource which can be described). type: str kind: description: - Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. type: str message: description: - A human-readable description of the status of this operation. type: str metadata: description: - Standard list metadata. type: complex contains: resource_version: description: - String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. type: str self_link: description: - SelfLink is a URL representing this object. Populated by the system. Read-only. type: str reason: description: - A machine-readable description of why this operation is in the "Failure" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it. type: str status: description: - 'Status of the operation. One of: "Success" or "Failure".' type: str labels: description: - Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. type: complex contains: str, str name: description: - Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. type: str namespace: description: - Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. type: str owner_references: description: - List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. type: list contains: api_version: description: - API version of the referent. type: str block_owner_deletion: description: - If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. type: bool controller: description: - If true, this reference points to the managing controller. type: bool kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str uid: description: - UID of the referent. type: str resource_version: description: - An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . type: str self_link: description: - SelfLink is a URL representing this object. Populated by the system. Read-only. type: str uid: description: - UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. type: str spec: description: - spec holds all the input necessary to produce a new build, and the conditions when to trigger them. type: complex contains: completion_deadline_seconds: description: - completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer type: int failed_builds_history_limit: description: - failedBuildsHistoryLimit is the number of old failed builds to retain. If not specified, all failed builds are retained. type: int node_selector: description: - nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored. type: complex contains: str, str output: description: - output describes the Docker image the Strategy should produce. type: complex contains: image_labels: description: - imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used. type: list contains: name: description: - name defines the name of the label. It must have non-zero length. type: str value: description: - value defines the literal value of the label. type: str push_secret: description: - PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub). type: complex contains: name: description: - Name of the referent. type: str to: description: - to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a Docker image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified. type: complex contains: api_version: description: - API version of the referent. type: str field_path: description: - 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.' type: str kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str namespace: description: - Namespace of the referent. type: str resource_version: description: - Specific resourceVersion to which this reference is made, if any. type: str uid: description: - UID of the referent. type: str post_commit: description: - postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry. type: complex contains: args: description: - args is a list of arguments that are provided to either Command, Script or the Docker image's default entrypoint. The arguments are placed immediately after the command to be run. type: list contains: str command: description: - command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient. type: list contains: str script: description: - script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH. type: str resources: description: - resources computes resource requirements to execute the build. type: complex contains: limits: description: - Limits describes the maximum amount of compute resources allowed. type: complex contains: str, str requests: description: - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. type: complex contains: str, str revision: description: - revision is the information from the source for a specific repo snapshot. This is optional. type: complex contains: git: description: - Git contains information about git-based build source type: complex contains: author: description: - author is the author of a specific commit type: complex contains: email: description: - email of the source control user type: str name: description: - name of the source control user type: str commit: description: - commit is the commit hash identifying a specific commit type: str committer: description: - committer is the committer of a specific commit type: complex contains: email: description: - email of the source control user type: str name: description: - name of the source control user type: str message: description: - message is the description of a specific commit type: str type: description: - type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' type: str run_policy: description: - RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to "Serial". type: str service_account: description: - serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount type: str source: description: - source describes the SCM in use. type: complex contains: binary: description: - binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and Docker builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN. type: complex contains: as_file: description: - asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying "webapp.war" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'. type: str context_dir: description: - contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository. type: str dockerfile: description: - dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir. type: str git: description: - git contains optional information about git build source type: complex contains: http_proxy: description: - httpProxy is a proxy used to reach the git repository over http type: str https_proxy: description: - httpsProxy is a proxy used to reach the git repository over https type: str no_proxy: description: - noProxy is the list of domains for which the proxy should not be used type: str ref: description: - ref is the branch/tag/ref to build. type: str uri: description: - uri points to the source that will be built. The structure of the source will depend on the type of build to run type: str images: description: - images describes a set of images to be used to provide source for the build type: list contains: _from: description: - from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from. type: complex contains: api_version: description: - API version of the referent. type: str field_path: description: - 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.' type: str kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str namespace: description: - Namespace of the referent. type: str resource_version: description: - Specific resourceVersion to which this reference is made, if any. type: str uid: description: - UID of the referent. type: str paths: description: - paths is a list of source and destination paths to copy from the image. type: list contains: destination_dir: description: - destinationDir is the relative directory within the build directory where files copied from the image are placed. type: str source_path: description: - sourcePath is the absolute path of the file or directory inside the image to copy to the build directory. If the source path ends in /. then the content of the directory will be copied, but the directory itself will not be created at the destination. type: str pull_secret: description: - pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set. type: complex contains: name: description: - Name of the referent. type: str secrets: description: - secrets represents a list of secrets and their destinations that will be used only for the build. type: list contains: destination_dir: description: - destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the Docker build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during docker build. type: str secret: description: - secret is a reference to an existing secret that you want to use in your build. type: complex contains: name: description: - Name of the referent. type: str source_secret: description: - "sourceSecret is the name of a Secret that would be used for setting\ \ up the authentication for cloning private repository. The secret\ \ contains valid credentials for remote repository, where the\ \ data's key represent the authentication method to be used and\ \ value is the base64 encoded credentials. Supported auth methods\ \ are: ssh-privatekey." type: complex contains: name: description: - Name of the referent. type: str type: description: - type of build input to accept type: str strategy: description: - strategy defines how to perform a build. type: complex contains: custom_strategy: description: - customStrategy holds the parameters to the Custom build strategy type: complex contains: _from: description: - from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled type: complex contains: api_version: description: - API version of the referent. type: str field_path: description: - 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.' type: str kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str namespace: description: - Namespace of the referent. type: str resource_version: description: - Specific resourceVersion to which this reference is made, if any. type: str uid: description: - UID of the referent. type: str build_api_version: description: - buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder type: str env: description: - env contains additional environment variables you want to pass into a builder container. type: list contains: name: description: - Name of the environment variable. Must be a C_IDENTIFIER. type: str value: description: - 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' type: str value_from: description: - Source for the environment variable's value. Cannot be used if value is not empty. type: complex contains: config_map_key_ref: description: - Selects a key of a ConfigMap. type: complex contains: key: description: - The key to select. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the ConfigMap or it's key must be defined type: bool field_ref: description: - 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.' type: complex contains: api_version: description: - Version of the schema the FieldPath is written in terms of, defaults to "v1". type: str field_path: description: - Path of the field to select in the specified API version. type: str resource_field_ref: description: - 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' type: complex contains: container_name: description: - 'Container name: required for volumes, optional for env vars' type: str divisor: description: - Specifies the output format of the exposed resources, defaults to "1" type: str resource: description: - 'Required: resource to select' type: str secret_key_ref: description: - Selects a key of a secret in the pod's namespace type: complex contains: key: description: - The key of the secret to select from. Must be a valid secret key. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the Secret or it's key must be defined type: bool expose_docker_socket: description: - exposeDockerSocket will allow running Docker commands (and build Docker images) from inside the Docker container. type: bool force_pull: description: - forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally type: bool pull_secret: description: - pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries type: complex contains: name: description: - Name of the referent. type: str secrets: description: - secrets is a list of additional secrets that will be included in the build pod type: list contains: mount_path: description: - mountPath is the path at which to mount the secret type: str secret_source: description: - secretSource is a reference to the secret type: complex contains: name: description: - Name of the referent. type: str docker_strategy: description: - dockerStrategy holds the parameters to the Docker build strategy. type: complex contains: _from: description: - from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled the resulting image will be used in the FROM line of the Dockerfile for this build. type: complex contains: api_version: description: - API version of the referent. type: str field_path: description: - 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.' type: str kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str namespace: description: - Namespace of the referent. type: str resource_version: description: - Specific resourceVersion to which this reference is made, if any. type: str uid: description: - UID of the referent. type: str build_args: description: - buildArgs contains build arguments that will be resolved in the Dockerfile. See type: list contains: name: description: - Name of the environment variable. Must be a C_IDENTIFIER. type: str value: description: - 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' type: str value_from: description: - Source for the environment variable's value. Cannot be used if value is not empty. type: complex contains: config_map_key_ref: description: - Selects a key of a ConfigMap. type: complex contains: key: description: - The key to select. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the ConfigMap or it's key must be defined type: bool field_ref: description: - 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.' type: complex contains: api_version: description: - Version of the schema the FieldPath is written in terms of, defaults to "v1". type: str field_path: description: - Path of the field to select in the specified API version. type: str resource_field_ref: description: - 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' type: complex contains: container_name: description: - 'Container name: required for volumes, optional for env vars' type: str divisor: description: - Specifies the output format of the exposed resources, defaults to "1" type: str resource: description: - 'Required: resource to select' type: str secret_key_ref: description: - Selects a key of a secret in the pod's namespace type: complex contains: key: description: - The key of the secret to select from. Must be a valid secret key. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the Secret or it's key must be defined type: bool dockerfile_path: description: - dockerfilePath is the path of the Dockerfile that will be used to build the Docker image, relative to the root of the context (contextDir). type: str env: description: - env contains additional environment variables you want to pass into a builder container. type: list contains: name: description: - Name of the environment variable. Must be a C_IDENTIFIER. type: str value: description: - 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' type: str value_from: description: - Source for the environment variable's value. Cannot be used if value is not empty. type: complex contains: config_map_key_ref: description: - Selects a key of a ConfigMap. type: complex contains: key: description: - The key to select. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the ConfigMap or it's key must be defined type: bool field_ref: description: - 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.' type: complex contains: api_version: description: - Version of the schema the FieldPath is written in terms of, defaults to "v1". type: str field_path: description: - Path of the field to select in the specified API version. type: str resource_field_ref: description: - 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' type: complex contains: container_name: description: - 'Container name: required for volumes, optional for env vars' type: str divisor: description: - Specifies the output format of the exposed resources, defaults to "1" type: str resource: description: - 'Required: resource to select' type: str secret_key_ref: description: - Selects a key of a secret in the pod's namespace type: complex contains: key: description: - The key of the secret to select from. Must be a valid secret key. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the Secret or it's key must be defined type: bool force_pull: description: - forcePull describes if the builder should pull the images from registry prior to building. type: bool image_optimization_policy: description: - imageOptimizationPolicy describes what optimizations the system can use when building images to reduce the final size or time spent building the image. The default policy is 'None' which means the final build image will be equivalent to an image created by the Docker build API. The experimental policy 'SkipLayers' will avoid commiting new layers in between each image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' policy. An additional experimental policy 'SkipLayersAndWarn' is the same as 'SkipLayers' but simply warns if compatibility cannot be preserved. type: str no_cache: description: - noCache if set to true indicates that the docker build must be executed with the --no-cache=true flag type: bool pull_secret: description: - pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries type: complex contains: name: description: - Name of the referent. type: str jenkins_pipeline_strategy: description: - JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. This strategy is in tech preview. type: complex contains: env: description: - env contains additional environment variables you want to pass into a build pipeline. type: list contains: name: description: - Name of the environment variable. Must be a C_IDENTIFIER. type: str value: description: - 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' type: str value_from: description: - Source for the environment variable's value. Cannot be used if value is not empty. type: complex contains: config_map_key_ref: description: - Selects a key of a ConfigMap. type: complex contains: key: description: - The key to select. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the ConfigMap or it's key must be defined type: bool field_ref: description: - 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.' type: complex contains: api_version: description: - Version of the schema the FieldPath is written in terms of, defaults to "v1". type: str field_path: description: - Path of the field to select in the specified API version. type: str resource_field_ref: description: - 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' type: complex contains: container_name: description: - 'Container name: required for volumes, optional for env vars' type: str divisor: description: - Specifies the output format of the exposed resources, defaults to "1" type: str resource: description: - 'Required: resource to select' type: str secret_key_ref: description: - Selects a key of a secret in the pod's namespace type: complex contains: key: description: - The key of the secret to select from. Must be a valid secret key. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the Secret or it's key must be defined type: bool jenkinsfile: description: - Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. type: str jenkinsfile_path: description: - JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. type: str source_strategy: description: - sourceStrategy holds the parameters to the Source build strategy. type: complex contains: _from: description: - from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled type: complex contains: api_version: description: - API version of the referent. type: str field_path: description: - 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.' type: str kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str namespace: description: - Namespace of the referent. type: str resource_version: description: - Specific resourceVersion to which this reference is made, if any. type: str uid: description: - UID of the referent. type: str env: description: - env contains additional environment variables you want to pass into a builder container. type: list contains: name: description: - Name of the environment variable. Must be a C_IDENTIFIER. type: str value: description: - 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' type: str value_from: description: - Source for the environment variable's value. Cannot be used if value is not empty. type: complex contains: config_map_key_ref: description: - Selects a key of a ConfigMap. type: complex contains: key: description: - The key to select. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the ConfigMap or it's key must be defined type: bool field_ref: description: - 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.' type: complex contains: api_version: description: - Version of the schema the FieldPath is written in terms of, defaults to "v1". type: str field_path: description: - Path of the field to select in the specified API version. type: str resource_field_ref: description: - 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' type: complex contains: container_name: description: - 'Container name: required for volumes, optional for env vars' type: str divisor: description: - Specifies the output format of the exposed resources, defaults to "1" type: str resource: description: - 'Required: resource to select' type: str secret_key_ref: description: - Selects a key of a secret in the pod's namespace type: complex contains: key: description: - The key of the secret to select from. Must be a valid secret key. type: str name: description: - Name of the referent. type: str optional: description: - Specify whether the Secret or it's key must be defined type: bool force_pull: description: - forcePull describes if the builder should pull the images from registry prior to building. type: bool incremental: description: - incremental flag forces the Source build to do incremental builds if true. type: bool pull_secret: description: - pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries type: complex contains: name: description: - Name of the referent. type: str runtime_artifacts: description: - 'runtimeArtifacts specifies a list of source/destination pairs that will be copied from the builder to the runtime image. sourcePath can be a file or directory. destinationDir must be a directory. destinationDir can also be empty or equal to ".", in this case it just refers to the root of WORKDIR. Deprecated: This feature will be removed in a future release. Use ImageSource to copy binary artifacts created from one build into a separate runtime image.' type: list contains: destination_dir: description: - destinationDir is the relative directory within the build directory where files copied from the image are placed. type: str source_path: description: - sourcePath is the absolute path of the file or directory inside the image to copy to the build directory. If the source path ends in /. then the content of the directory will be copied, but the directory itself will not be created at the destination. type: str runtime_image: description: - 'runtimeImage is an optional image that is used to run an application without unneeded dependencies installed. The building of the application is still done in the builder image but, post build, you can copy the needed artifacts in the runtime image for use. Deprecated: This feature will be removed in a future release. Use ImageSource to copy binary artifacts created from one build into a separate runtime image.' type: complex contains: api_version: description: - API version of the referent. type: str field_path: description: - 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.' type: str kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str namespace: description: - Namespace of the referent. type: str resource_version: description: - Specific resourceVersion to which this reference is made, if any. type: str uid: description: - UID of the referent. type: str scripts: description: - scripts is the location of Source scripts type: str type: description: - type is the kind of build strategy. type: str successful_builds_history_limit: description: - successfulBuildsHistoryLimit is the number of old successful builds to retain. If not specified, all successful builds are retained. type: int triggers: description: - triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation. type: list contains: bitbucket: description: - BitbucketWebHook contains the parameters for a Bitbucket webhook type of trigger type: complex contains: allow_env: description: - allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook. type: bool secret: description: - secret used to validate requests. type: str generic: description: - generic contains the parameters for a Generic webhook type of trigger type: complex contains: allow_env: description: - allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook. type: bool secret: description: - secret used to validate requests. type: str github: description: - github contains the parameters for a GitHub webhook type of trigger type: complex contains: allow_env: description: - allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook. type: bool secret: description: - secret used to validate requests. type: str gitlab: description: - GitLabWebHook contains the parameters for a GitLab webhook type of trigger type: complex contains: allow_env: description: - allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook. type: bool secret: description: - secret used to validate requests. type: str image_change: description: - imageChange contains parameters for an ImageChange type of trigger type: complex contains: _from: description: - from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration. type: complex contains: api_version: description: - API version of the referent. type: str field_path: description: - 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.' type: str kind: description: - Kind of the referent. type: str name: description: - Name of the referent. type: str namespace: description: - Namespace of the referent. type: str resource_version: description: - Specific resourceVersion to which this reference is made, if any. type: str uid: description: - UID of the referent. type: str last_triggered_image_id: description: - lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build type: str type: description: - type is the type of build trigger type: str status: description: - status holds any relevant information about a build config type: complex contains: last_version: description: - lastVersion is used to inform about number of last triggered build. type: int kind: description: - Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. type: str metadata: description: - metadata for BuildConfigList. type: complex contains: resource_version: description: - String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. type: str self_link: description: - SelfLink is a URL representing this object. Populated by the system. Read-only. type: str ''' def main(): try: module = OpenShiftAnsibleModule('build_config_list', 'v1') except OpenShiftAnsibleException as exc: # The helper failed to init, so there is no module object. All we can do is raise the error. raise Exception(exc.message) try: module.execute_module() except OpenShiftAnsibleException as exc: module.fail_json(msg="Module failed!", error=str(exc)) if __name__ == '__main__': main()
1.765625
2
aws-regions.py
groorj/cloud-regions
0
199
<reponame>groorj/cloud-regions import json import logging import os import inspect import urllib import urllib.request from urllib.error import HTTPError # logger logger = logging.getLogger() logger_level = logging.getLevelName(os.environ['LOGGER_LEVEL']) logger.setLevel(logger_level) # validate access def validate_access(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) logger.debug("RESTRICTED_ACCESS_ENABLED: [%s]", os.environ['RESTRICTED_ACCESS_ENABLED']) error_message = "You are not allowed, get out!" if os.environ['RESTRICTED_ACCESS_ENABLED'] == 'true': logger.info("Restricted access is enabled") logger.info("Value for header [%s] is: [%s]", os.environ['RESTRICTED_ACCESS_HTTP_HEADER'], event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']]) if event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']] != os.environ['RESTRICTED_ACCESS_SECRET']: logger.info("Key provided is not valid") logger.debug("Error: [%s]", error_message) http_code = 403 raise ValueError(http_code, error_message) else: logger.info("Key provided is valid") else: logger.info("Restricted access is NOT enabled") # create response def create_response_new(status_code, message_body): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return { 'statusCode': str(status_code), 'body': json.dumps(message_body), 'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' }, } # download json file def get_json(): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) try: response = urllib.request.urlopen(os.environ['AWS_REGIONS_JSON_URL']) except HTTPError as err: # catch HTTP error logger.debug("HTTP error: [%s]", err) raise json_data = json.loads(response.read()) return json_data # entry point -> return region info def get_region_info(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return_info_final = {} # validate the access to this resource try: validate_access(event, context) except ValueError as err: return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] } return create_response_new(err.args[0], return_info_final) # get region info region_code = event['pathParameters']['region_code'] logger.debug("region_code: [%s]", region_code) try: json_data = get_json() except HTTPError as err: # http_code = err.code http_code = 500 return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code } return create_response_new(http_code, return_info_final) # logger.debug("json_data: [%s]", json_data) # logger.debug("type(json_data): [%s]", type(json_data)) for element in json_data['data']: # logger.debug("code: [%s] && region_code: [%s]", element['code'], region_code) if element['code'] == region_code: logger.info("region_code found") http_code = 200 return_info_final['request'] = { "request_status": "Success" } return_info_final['info'] = json_data['info'] return_info_final['data'] = element break else: logger.info("region_code NOT found") return_info = "Region code NOT found." http_code = 404 return_info_final['request'] = { "request_status": "Fail", "error_message": "Region code NOT found.", "http_error_code": http_code } return create_response_new(http_code, return_info_final) # entry point -> return region info def get_all_regions_info(event, context): logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name) return_info_final = {} # validate the access to this resource try: validate_access(event, context) except ValueError as err: return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] } return create_response_new(err.args[0], return_info_final) # get regions info try: json_data = get_json() except HTTPError as err: # http_code = err.code http_code = 500 return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code } return create_response_new(http_code, return_info_final) logger.debug("json_data: [%s]", json_data) http_code = 200 return_info_final['request'] = { "request_status": "Success" } return_info_final['info'] = json_data['info'] return_info_final['data'] = json_data['data'] return create_response_new(http_code, return_info_final) # End;
2.328125
2