{"seq_id": "31760171912", "text": "from sqlite3 import connect\n\n\ndef add_words(cur, new):\n cur.execute('select * from words where words.word = ?', (new['keywords'],))\n res = cur.fetchone()\n print(res)\n if res:\n if res[2] < new['count']:\n # обновление строки таблицы\n cur.execute('update words set count = ?, up = ?, down = ? where words.id = ?',\n (new['count'], new['up'], new['down'], res[0]))\n print('Edit')\n else:\n print('Not edit')\n else:\n # добавление строки в таблице\n cur.execute('insert into words values (null, ?, ?, ?, ?)',\n (new['keywords'], new['count'], new['up'], new['down']))\n print('Done')\n return cur\n\n\ndef add_skills(cur, new):\n for item in new['requirements']:\n res = cur.execute('select * from skills where skills.name = ?', (item['name'],))\n if not res.fetchone():\n print(item['name'])\n cur.execute('insert into skills values (null, ?)', (item['name'],))\n return cur\n\n\ndef add_ws(cur, new):\n cur.execute('select id, count from words where words.word = ?', (new['keywords'],))\n word_id, word_count = cur.fetchone()\n for item in new['requirements']:\n cur.execute('select id from skills where skills.name = ?', (item['name'],))\n skill_id = cur.fetchone()[0]\n print(word_id, skill_id)\n cur.execute('select * from wordskills as ws where ws.id_word = ? and ws.id_skill = ?',\n (word_id, skill_id))\n res = cur.fetchone()\n if not res:\n cur.execute('insert into wordskills values (null, ?, ?, ?, ?)',\n (word_id, skill_id, item['count'], item['percent']))\n print('ws done')\n elif word_count < new['count']:\n cur.execute('update wordskills as ws set count = ?, percent = ? where ws.id_word = ? and ws.id_skill = ?',\n (item['count'], item['percent'], word_id, skill_id))\n print('ws edit')\n print('ws not edit')\n return cur\n\n\ndef add_row(new):\n con = connect('base.db')\n cur = con.cursor()\n cur = add_words(cur, new)\n cur = add_skills(cur, new)\n cur = add_ws(cur, new)\n con.commit()\n con.close()\n", "repo_name": "nemu-haibane/python17", "sub_path": "crud.py", "file_name": "crud.py", "file_ext": "py", "file_size_in_byte": 2295, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sqlite3.connect", "line_number": 56, "usage_type": "call"}]} {"seq_id": "42989466623", "text": "from geopy.geocoders import Nominatim\nfrom geopy.distance import geodesic\nfrom pprint import pprint\n\ngeolocator = Nominatim(user_agent=\"snakes-distance-geopy\")\nschool = geolocator.geocode(\"raffles institution singapore\")\n\nprint('Snakey school is located at {}'.format(school.address))\nprint('Snakey school\\'s coordinates are ({}, {}) '.format(\n school.latitude, school.longitude))\n\nhome = geolocator.geocode(\"58 college green singapore\")\nprint('Home is located at {}'.format(home.address))\n\nhome_coordinates = home.latitude, home.longitude\nschool_coordinates = school.latitude, school.longitude\ndistance = geodesic(home_coordinates, school_coordinates).km\nprint('The distance from home to school is {:.2f} km'.format(distance))", "repo_name": "siowyisheng/30-things-in-python", "sub_path": "22-geo-calculate-distance/snakes_distance.py", "file_name": "snakes_distance.py", "file_ext": "py", "file_size_in_byte": 730, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "41", "api": [{"api_name": "geopy.geocoders.Nominatim", "line_number": 5, "usage_type": "call"}, {"api_name": "geopy.distance.geodesic", "line_number": 17, "usage_type": "call"}]} {"seq_id": "36825740582", "text": "\"\"\"Add Ingredients table\n\nRevision ID: 28b34376bb94\nRevises: d3685ce0e0db\nCreate Date: 2021-10-25 21:24:17.530543\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '28b34376bb94'\ndown_revision = 'd3685ce0e0db'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"ingredients\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"name\", sa.String(40), nullable=False),\n sa.Column(\"unit\", sa.String(25)),\n sa.Column(\"quantity\", sa.Integer(), nullable=True),\n sa.Column(\"recipe_id\", sa.Integer(), sa.ForeignKey(\"recipe.id\"))\n )\n\n\ndef downgrade():\n op.drop_table(\"ingredients\")\n", "repo_name": "alucardthefish/RecipesApi", "sub_path": "alembic/versions/28b34376bb94_add_ingredients_table.py", "file_name": "28b34376bb94_add_ingredients_table.py", "file_ext": "py", "file_size_in_byte": 712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "alembic.op.create_table", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}]} {"seq_id": "21223952918", "text": "#Algoritham to create a program which performs three essential operations\n#Open the file and process each line.\n#Either add each word to the dictionary with a frequency of 1 or update the word’s count by 1.\n#Nicely print the output, in this case from high to low frequency.\n#DSC510-T303 Introduction to Programming (2205-1)\n#Created by Rajkumar Kuppuswami\n#Created on 05/01/2020\n#Program to perform processing the line by adding each word to the dictionary.\n#Once values are added need to format as list to sort the values by desc.\n\nimport collections\n\ndef main():\n dictionary = dict()\n#1st method to open the file\n# with open('Gettysburg.txt', 'r') as file_read:\n# Read the text file\n file_read = open('Gettysburg.txt', 'r')\n# Process word one by one\n for line in file_read:\n process_line(line, dictionary)\n print(\"Length of Dictionary is : {}\".format(len(dictionary.keys())))#Total Lenght of the dictionary\n\n print(pretty_print(dictionary))#Output of each word with count\n\n\ndef add_word(word, word_count):#Adding the words to the dictionary with count of words\n if word in word_count: #To validate the words to avoid duplicate\n word_count[word] = word_count[word]+1 #counting the number of words\n else:\n word_count[word] =1\n\ndef process_line (line, dictionary):\n line = line.lower() ##convert to lower case\n line = line.strip()# remove /n line and unwanted space\n words = line.split(\" \")#Split the words from the text by line\n for word in words:\n add_word(word, dictionary)\n\n\ndef pretty_print(dictionary):\n table = collections.defaultdict(list)\n for a , b in dictionary.items():\n table[b].append(a)\n sort_table = sorted(table.items(), reverse=True) # Sort list of tuple by count in descending order.\n for item in sort_table:\n count = item[0]\n for word in item[1]:\n print(word + \" \" * (30 - len(word)) + str(count))\n\nmain()\n\n\n", "repo_name": "dlingerfelt/DSC510Spring2020", "sub_path": "Kuppuswami_DSC510/Week8.1.py", "file_name": "Week8.1.py", "file_ext": "py", "file_size_in_byte": 1942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "collections.defaultdict", "line_number": 42, "usage_type": "call"}]} {"seq_id": "3150281386", "text": "from datetime import datetime\nfrom typing import Union, Any\nfrom flask_migrate import stamp, upgrade\nfrom sqlalchemy.exc import OperationalError, InvalidRequestError\nfrom sqlalchemy.orm import Query\nfrom mcserver.app import db\nfrom mcserver.app.models import ResourceType\nfrom mcserver.config import Config\nfrom mcserver.models_auto import Corpus, Exercise, UpdateInfo, LearningResult\n\n\nclass DatabaseService:\n @staticmethod\n def commit():\n \"\"\"Commits the last action to the database and, if it fails, rolls back the current session.\"\"\"\n try:\n db.session.commit()\n except (OperationalError, InvalidRequestError):\n db.session.rollback()\n raise\n\n @staticmethod\n def has_table(table: str) -> bool:\n \"\"\"Checks if a table is present in the database or not.\"\"\"\n return db.engine.dialect.has_table(db.engine, table)\n\n @staticmethod\n def init_db_alembic() -> None:\n \"\"\"In Docker, the alembic version is not initially written to the database, so we need to set it manually.\"\"\"\n if not DatabaseService.has_table(Config.DATABASE_TABLE_ALEMBIC):\n stamp(directory=Config.MIGRATIONS_DIRECTORY)\n upgrade(directory=Config.MIGRATIONS_DIRECTORY)\n\n @staticmethod\n def init_db_update_info() -> None:\n \"\"\"Initializes update entries for all resources that have not yet been created.\"\"\"\n if DatabaseService.has_table(Config.DATABASE_TABLE_UPDATEINFO):\n for rt in ResourceType:\n ui_cts: UpdateInfo = DatabaseService.query(\n UpdateInfo, filter_by=dict(resource_type=rt.name), first=True)\n if ui_cts is None:\n ui_cts = UpdateInfo.from_dict(resource_type=rt.name, last_modified_time=1,\n created_time=datetime.utcnow().timestamp())\n db.session.add(ui_cts)\n DatabaseService.commit()\n\n @staticmethod\n def query(table: Union[Corpus, Exercise, LearningResult, UpdateInfo], filter_by: dict = None,\n first: bool = False) -> Any:\n \"\"\"Executes a query on the database and rolls back the session if errors occur.\"\"\"\n try:\n ret_val: Query = db.session.query(table)\n if filter_by:\n ret_val = ret_val.filter_by(**filter_by)\n ret_val = ret_val.first() if first else ret_val.all()\n DatabaseService.commit()\n return ret_val\n except:\n db.session.rollback()\n return None\n", "repo_name": "korpling/machina-callida", "sub_path": "mc_backend/mcserver/app/services/databaseService.py", "file_name": "databaseService.py", "file_ext": "py", "file_size_in_byte": 2562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "mcserver.app.db.session.commit", "line_number": 17, "usage_type": "call"}, {"api_name": "mcserver.app.db.session", "line_number": 17, "usage_type": "attribute"}, {"api_name": "mcserver.app.db", "line_number": 17, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 18, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.InvalidRequestError", "line_number": 18, "usage_type": "name"}, {"api_name": "mcserver.app.db.session.rollback", "line_number": 19, "usage_type": "call"}, {"api_name": "mcserver.app.db.session", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mcserver.app.db", "line_number": 19, "usage_type": "name"}, {"api_name": "mcserver.app.db.engine.dialect.has_table", "line_number": 25, "usage_type": "call"}, {"api_name": "mcserver.app.db.engine", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mcserver.app.db", "line_number": 25, "usage_type": "name"}, {"api_name": "mcserver.config.Config.DATABASE_TABLE_ALEMBIC", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mcserver.config.Config", "line_number": 30, "usage_type": "name"}, {"api_name": "flask_migrate.stamp", "line_number": 31, "usage_type": "call"}, {"api_name": "mcserver.config.Config.MIGRATIONS_DIRECTORY", "line_number": 31, "usage_type": "attribute"}, {"api_name": "mcserver.config.Config", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_migrate.upgrade", "line_number": 32, "usage_type": "call"}, {"api_name": "mcserver.config.Config.MIGRATIONS_DIRECTORY", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mcserver.config.Config", "line_number": 32, "usage_type": "name"}, {"api_name": "mcserver.config.Config.DATABASE_TABLE_UPDATEINFO", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mcserver.config.Config", "line_number": 37, "usage_type": "name"}, {"api_name": "mcserver.app.models.ResourceType", "line_number": 38, "usage_type": "name"}, {"api_name": "mcserver.models_auto.UpdateInfo", "line_number": 39, "usage_type": "name"}, {"api_name": "mcserver.models_auto.UpdateInfo", "line_number": 40, "usage_type": "argument"}, {"api_name": "mcserver.models_auto.UpdateInfo.from_dict", "line_number": 42, "usage_type": "call"}, {"api_name": "mcserver.models_auto.UpdateInfo", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "mcserver.app.db.session.add", "line_number": 44, "usage_type": "call"}, {"api_name": "mcserver.app.db.session", "line_number": 44, "usage_type": "attribute"}, {"api_name": "mcserver.app.db", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 48, "usage_type": "name"}, {"api_name": "mcserver.models_auto.Corpus", "line_number": 48, "usage_type": "name"}, {"api_name": "mcserver.models_auto.Exercise", "line_number": 48, "usage_type": "name"}, {"api_name": "mcserver.models_auto.LearningResult", "line_number": 48, "usage_type": "name"}, {"api_name": "mcserver.models_auto.UpdateInfo", "line_number": 48, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Query", "line_number": 52, "usage_type": "name"}, {"api_name": "mcserver.app.db.session.query", "line_number": 52, "usage_type": "call"}, {"api_name": "mcserver.app.db.session", "line_number": 52, "usage_type": "attribute"}, {"api_name": "mcserver.app.db", "line_number": 52, "usage_type": "name"}, {"api_name": "mcserver.app.db.session.rollback", "line_number": 59, "usage_type": "call"}, {"api_name": "mcserver.app.db.session", "line_number": 59, "usage_type": "attribute"}, {"api_name": "mcserver.app.db", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}]} {"seq_id": "23389263038", "text": "import os\nimport json\nimport pandas as pd\nfrom helper import json_extract\n\nuber_eats = []\ndirectory = \"../data_output/ubereats/stores/\"\nfor filename in os.listdir(directory):\n if filename.endswith(\".json\"):\n f = os.path.join(directory, filename)\n fd = open(f, 'rb')\n file_json = json.load(fd)\n try:\n uuid = json_extract(file_json, 'uuid')[0]\n slug = json_extract(file_json, 'slug')[0]\n postcode = json_extract(file_json, 'postalCode')[0]\n postcode = str(postcode)\n latitude = json_extract(file_json, 'latitude')[0]\n longitude = json_extract(file_json, 'longitude')[0]\n data = [uuid, slug, postcode, latitude, longitude]\n uber_eats.append(data)\n except:\n print(filename)\n\ndf_uber_eats = pd.DataFrame(uber_eats, columns=['uuid', 'slug', 'zipcode', 'latitude', 'longitude'])\ndf_uber_eats.to_csv('../data_output/ubereats_stores.csv', index=False)\n\n\n", "repo_name": "chengjun-curb/sandbox", "sub_path": "location_attributes/data_prep/etl_ubereats.py", "file_name": "etl_ubereats.py", "file_ext": "py", "file_size_in_byte": 982, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.listdir", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "helper.json_extract", "line_number": 14, "usage_type": "call"}, {"api_name": "helper.json_extract", "line_number": 15, "usage_type": "call"}, {"api_name": "helper.json_extract", "line_number": 16, "usage_type": "call"}, {"api_name": "helper.json_extract", "line_number": 18, "usage_type": "call"}, {"api_name": "helper.json_extract", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}]} {"seq_id": "11332465149", "text": "from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub\nfrom fairseq.models.text_to_speech.hub_interface import TTSHubInterface\n\n\ndef convert_text_to_speech(text):\n \"\"\"\n This function converts a given text to speech using the 'facebook/tts_transformer-fr-cv7_css10' model from Fairseq.\n The model is specialized in converting French text to speech.\n \n Args:\n text (str): The text to be converted to speech.\n \n Returns:\n wav (numpy array): The generated speech in the form of a wave file.\n rate (int): The sample rate of the generated speech.\n \"\"\"\n models, cfg, task = load_model_ensemble_and_task_from_hf_hub(\n 'facebook/tts_transformer-fr-cv7_css10',\n arg_overrides={'vocoder': 'hifigan', 'fp16': False}\n )\n model = models[0]\n TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)\n generator = task.build_generator(model, cfg)\n sample = TTSHubInterface.get_model_input(task, text)\n wav, rate = TTSHubInterface.get_prediction(task, model, generator, sample)\n return wav, rate", "repo_name": "vixuowis/Research-2309", "sub_path": "Exp-2/output/hf-eval-data-v1/f00421_convert_text_to_speech.py", "file_name": "f00421_convert_text_to_speech.py", "file_ext": "py", "file_size_in_byte": 1075, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "fairseq.checkpoint_utils.load_model_ensemble_and_task_from_hf_hub", "line_number": 17, "usage_type": "call"}, {"api_name": "fairseq.models.text_to_speech.hub_interface.TTSHubInterface.update_cfg_with_data_cfg", "line_number": 22, "usage_type": "call"}, {"api_name": "fairseq.models.text_to_speech.hub_interface.TTSHubInterface", "line_number": 22, "usage_type": "name"}, {"api_name": "fairseq.models.text_to_speech.hub_interface.TTSHubInterface.get_model_input", "line_number": 24, "usage_type": "call"}, {"api_name": "fairseq.models.text_to_speech.hub_interface.TTSHubInterface", "line_number": 24, "usage_type": "name"}, {"api_name": "fairseq.models.text_to_speech.hub_interface.TTSHubInterface.get_prediction", "line_number": 25, "usage_type": "call"}, {"api_name": "fairseq.models.text_to_speech.hub_interface.TTSHubInterface", "line_number": 25, "usage_type": "name"}]} {"seq_id": "26216402025", "text": "import sys\nimport unittest\nfrom appium import webdriver\nfrom appium.options.common.base import AppiumOptions\nfrom appium.webdriver.common.appiumby import AppiumBy\nfrom appium.options.common.app_option import AppOption\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass ATSPIOptions(AppiumOptions, AppOption):\n pass\n\n\nclass TimelineTest(unittest.TestCase):\n\n def setUp(self):\n options = ATSPIOptions()\n options.app = tokodon_offline_path\n self.driver = webdriver.Remote(\n command_executor='http://127.0.0.1:4723',\n options=options)\n\n def tearDown(self):\n self.driver.get_screenshot_as_file(\"failed_test_shot_{}.png\".format(self.id()))\n self.driver.quit()\n\n def test_status_type(self):\n self.assertTrue(self.driver.find_element(by='description', value=\"Normal Status\"))\n self.assertTrue(self.driver.find_element(by='description', value=\"Spoiler Status\"))\n\n def test_favourite_interactions(self):\n favouriteButton=self.driver.find_element(by='description',value=\"Favourite\")\n favouriteButton.click()\n self.assertTrue(self.driver.find_element(by='description', value=\"Favourited\"))\n\n def test_bookmark_interactions(self):\n bookmarkButton=self.driver.find_element(by='description',value=\"Bookmark\")\n bookmarkButton.click()\n self.assertTrue(self.driver.find_element(by='description', value=\"Bookmarked\"))\n\n def test_boost_interactions(self):\n boostButton=self.driver.find_element(by='description',value=\"Boost\")\n boostButton.click()\n self.assertTrue(self.driver.find_element(by='description', value=\"Boosted\"))\n\n def test_status_media(self):\n searchElement = self.driver.find_element(by=AppiumBy.NAME, value=\"Home\")\n searchElement.send_keys(Keys.DOWN)\n searchElement.send_keys(Keys.DOWN)\n searchElement.send_keys(Keys.DOWN)\n self.assertTrue(self.driver.find_element(by='description', value=\"Status with image attachment\"))\n self.assertTrue(self.driver.find_element(by='description', value=\"Status with Video attachment\"))\n self.assertTrue(self.driver.find_element(by='description', value=\"Status with GifV attachment\"))\n\n\nif __name__ == '__main__':\n tokodon_offline_path = sys.argv[1]\n sys.argv.pop()\n unittest.main()", "repo_name": "KDE/tokodon", "sub_path": "src/autotests/appiumtests/TimelineTest.py", "file_name": "TimelineTest.py", "file_ext": "py", "file_size_in_byte": 2332, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 37, "dataset": "github-code", "pt": "41", "api": [{"api_name": "appium.options.common.base.AppiumOptions", "line_number": 10, "usage_type": "name"}, {"api_name": "appium.options.common.app_option.AppOption", "line_number": 10, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "appium.webdriver.Remote", "line_number": 19, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 19, "usage_type": "name"}, {"api_name": "appium.webdriver.common.appiumby.AppiumBy.NAME", "line_number": 47, "usage_type": "attribute"}, {"api_name": "appium.webdriver.common.appiumby.AppiumBy", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.DOWN", "line_number": 48, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 48, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.DOWN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.DOWN", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 50, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys.argv.pop", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 58, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 59, "usage_type": "call"}]} {"seq_id": "3008671809", "text": "\"\"\"\nFixtures for fanout\npurpose : one node to many nodes\nThe dataset has\n- 3 levels : L1, L2, L3\n- 2 resources: RI, RO\n- R in/out are connected\n\"\"\"\nimport pytest\nfrom os import path\n\nimport pandas as pd\n\n# module import\nfrom dependencynet.schema import SchemaBuilder\nfrom dependencynet.model import ModelBuilder\n\nfrom dependencynet.network.graphbuilder import LevelNode, InputNode, OutputNode\n\n\n@pytest.fixture\ndef schema_fanout():\n schema = SchemaBuilder().level('L1', 'L1') \\\n .level('L2', 'L2') \\\n .level('L3', 'L3') \\\n .resource('RI', 'RI', role='INPUT', connect_id_name='R') \\\n .resource('RO', 'RO', role='OUTPUT', connect_id_name='R') \\\n .connect('RO', 'RI') \\\n .render()\n return schema\n\n\n@pytest.fixture(scope=\"session\")\ndef compact_columns_fanout():\n columns = ['L1', 'L2', 'L3', 'RO', 'RI']\n return columns\n\n\n@pytest.fixture\ndef source_data_fanout(schema_fanout, compact_columns_fanout):\n filename = path.join('tests', 'resources', 'data', 'compact', 'fanout.csv')\n data = pd.read_csv(filename, delimiter=';')\n\n df = pd.DataFrame(data, columns=compact_columns_fanout)\n return df\n\n\n@pytest.fixture\ndef model_fanout(source_data_fanout, schema_fanout):\n model = ModelBuilder().from_compact(source_data_fanout) \\\n .with_schema(schema_fanout) \\\n .render()\n return model\n\n\n@pytest.fixture\ndef class_mapping_fanout():\n return {'L1': L1Node, 'L2': L2Node, 'L3': L3Node,\n 'RO': RONode, 'RI': RINode}\n\n\n# networkx classes\n\nclass L1Node(LevelNode):\n def __init__(self, properties):\n super().__init__(properties, 'L1')\n\n\nclass L2Node(LevelNode):\n def __init__(self, properties):\n super().__init__(properties, 'L2')\n\n\nclass L3Node(LevelNode):\n def __init__(self, properties):\n super().__init__(properties, 'L3')\n\n\nclass RINode(InputNode):\n def __init__(self, properties):\n super().__init__(properties, 'RI', 'R')\n\n\nclass RONode(OutputNode):\n def __init__(self, properties):\n super().__init__(properties, 'RO', 'R')\n", "repo_name": "cfalguiere/dependencynet", "sub_path": "tests/scenario/fanout/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 2217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "dependencynet.schema.SchemaBuilder", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 39, "usage_type": "attribute"}, {"api_name": "dependencynet.model.ModelBuilder", "line_number": 50, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 56, "usage_type": "attribute"}, {"api_name": "dependencynet.network.graphbuilder.LevelNode", "line_number": 64, "usage_type": "name"}, {"api_name": "dependencynet.network.graphbuilder.LevelNode", "line_number": 69, "usage_type": "name"}, {"api_name": "dependencynet.network.graphbuilder.LevelNode", "line_number": 74, "usage_type": "name"}, {"api_name": "dependencynet.network.graphbuilder.InputNode", "line_number": 79, "usage_type": "name"}, {"api_name": "dependencynet.network.graphbuilder.OutputNode", "line_number": 84, "usage_type": "name"}]} {"seq_id": "6178054017", "text": "import io\nimport os.path\n\nimport stardicter.czechenglish\nfrom stardicter.test_base import BaseTest\n\n\nclass CzechEnglishTest(BaseTest):\n writer_class = stardicter.czechenglish.CzechEnglishWriter\n\n\nclass CzechEnglishFileTest(CzechEnglishTest):\n def get_writer(self):\n '''\n Gets prepared writer class.\n '''\n return self.writer_class(\n file=io.open(os.path.join(\n os.path.dirname(__file__),\n 'test_data.txt'\n ), 'rb')\n )\n", "repo_name": "nijel/stardicter", "sub_path": "stardicter/test_czechenglish.py", "file_name": "test_czechenglish.py", "file_ext": "py", "file_size_in_byte": 510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 29, "dataset": "github-code", "pt": "41", "api": [{"api_name": "stardicter.test_base.BaseTest", "line_number": 8, "usage_type": "name"}, {"api_name": "stardicter.czechenglish.czechenglish", "line_number": 9, "usage_type": "attribute"}, {"api_name": "stardicter.czechenglish", "line_number": 9, "usage_type": "name"}, {"api_name": "io.open", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}]} {"seq_id": "2529970079", "text": "# fmtlib Conan package\n# Dmitriy Vetutnev, ODANT 2020\n\n\nfrom conans import ConanFile, CMake, tools\nimport os\n\n\nclass GoogletestConan(ConanFile):\n name = \"fmt\"\n version = \"10.1.1+0\"\n license = \"https://raw.githubusercontent.com/fmtlib/fmt/master/LICENSE.rst\"\n description = \"{fmt} is an open-source formatting library for C++. It can be used as a safe and fast alternative to (s)printf and iostreams.\"\n url = \"https://github.com/odant/conan-fmt\"\n settings = {\n \"os\": [\"Windows\", \"Linux\"],\n \"compiler\": [\"Visual Studio\", \"gcc\"],\n \"build_type\": [\"Debug\", \"Release\"],\n \"arch\": [\"x86\", \"x86_64\", \"mips\", \"armv7\"]\n }\n options = {\n \"with_unit_tests\": [True, False],\n \"ninja\": [True, False]\n }\n default_options = {\n \"with_unit_tests\": False,\n \"ninja\": True\n }\n generators = \"cmake\"\n exports_sources = \"src/*\", \"CMakeLists.txt\"\n no_copy_source = True\n build_policy = \"missing\"\n\n def build_requirements(self):\n if self.options.ninja:\n self.build_requires(\"ninja/[>=1.9.0]\")\n\n def build(self):\n cmake = CMake(self, msbuild_verbosity='normal')\n cmake.verbose = True\n cmake.definitions[\"FMT_INSTALL\"] = \"ON\"\n cmake.definitions[\"FMT_DOC\"] = \"OFF\"\n if self.options.with_unit_tests:\n cmake.definitions[\"FMT_TEST\"] = \"ON\"\n if self.settings.get_safe(\"compiler.runtime\") in (\"MT\", \"MTd\"):\n cmake.definitions[\"MSVC_BUILD_STATIC\"] = \"ON\"\n cmake.configure()\n cmake.build()\n if self.options.with_unit_tests:\n if cmake.is_multi_configuration:\n self.run(\"ctest --output-on-failure --build-config %s\" % build_type)\n else:\n self.run(\"ctest --output-on-failure\")\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib/pkgconfig\"))\n\n def package(self):\n self.copy(\"*fmt.pdb\", dst=\"bin\", keep_path=False)\n\n def package_id(self):\n self.info.options.with_unit_tests = \"any\"\n self.info.options.ninja = \"any\"\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n\n", "repo_name": "odant/conan-fmt", "sub_path": "conanfile.py", "file_name": "conanfile.py", "file_ext": "py", "file_size_in_byte": 2176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "conans.ConanFile", "line_number": 9, "usage_type": "name"}, {"api_name": "conans.CMake", "line_number": 39, "usage_type": "call"}, {"api_name": "conans.tools.rmdir", "line_number": 55, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 55, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "conans.tools.collect_libs", "line_number": 65, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 65, "usage_type": "name"}]} {"seq_id": "36321491926", "text": "from pydeck import Deck, Layer, ViewState\n\nfeatures = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [-122.42923736572264, 37.80544394934271],\n [0, 37.80544394934271],\n [-122.42923736572264, 0],\n [-122.42923736572264, 37.80544394934271],\n ]\n ],\n },\n }\n ],\n}\n\n\ndef create_geojson_layer_with_gmaps_test_object():\n return Deck(\n description=\"Test of GeoJsonLayer, with Google Maps basemap\",\n map_style=\"satellite\",\n map_provider=\"google_maps\",\n initial_view_state=ViewState(longitude=-122.45, latitude=37.8, zoom=0),\n layers=[\n Layer(\n \"GeoJsonLayer\",\n id=\"geojson-layer\",\n data=features,\n stroked=True,\n filled=True,\n line_width_min_pixels=2,\n opacity=0.4,\n get_line_color=[255, 100, 100],\n get_fill_color=[200, 160, 0, 180],\n )\n ],\n views=None,\n )\n\n\nif __name__ == \"__main__\":\n create_geojson_layer_with_gmaps_test_object().to_html(\"test.html\", offline=True)\n", "repo_name": "visgl/deck.gl", "sub_path": "bindings/pydeck/tests/bindings/pydeck_examples/geojson_layer_with_gmaps.py", "file_name": "geojson_layer_with_gmaps.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11339, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pydeck.Deck", "line_number": 26, "usage_type": "call"}, {"api_name": "pydeck.ViewState", "line_number": 30, "usage_type": "call"}, {"api_name": "pydeck.Layer", "line_number": 32, "usage_type": "call"}]} {"seq_id": "4324703105", "text": "import argparse\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import cross_val_predict, StratifiedKFold\nfrom sklearn.metrics import roc_auc_score, average_precision_score\n\nfrom constants import *\nfrom datasets import load_and_preprocess\nfrom utils.model_utils import save_model\nfrom utils.data_utils import get_roadmap_col_order\nimport models\n\nimport torch\nimport torch.nn.functional as F\nimport optuna\nfrom skorch import NeuralNetClassifier\nfrom skorch.callbacks import EpochScoring, LRScheduler\n\nMODEL_CHOICES = ['glm', 'standard', 'neighbors', 'e116_neigh']\n\n\ndef main(args):\n X, y = load_and_preprocess(args.project, args.model, split='train')\n\n if args.model == 'e116_neigh':\n def objective(trial):\n auc = EpochScoring(scoring='roc_auc', lower_is_better=False)\n apr = EpochScoring(scoring='average_precision', lower_is_better=False)\n lrs = LRScheduler(policy='StepLR', step_size=10, gamma=0.5)\n \n bs = trial.suggest_categorical('batch_size', [128])\n l2 = trial.suggest_uniform('l2', 5e-5, 1e-2)\n lr = trial.suggest_uniform('lr', 1e-4, 5e-3)\n epochs = trial.suggest_categorical('epochs', [30])\n n_filt = trial.suggest_categorical('n_filt', [8, 16, 32])\n width = trial.suggest_categorical('width', [3, 5, 7])\n lin_units = trial.suggest_categorical('lin_units', [100, 200, 400])\n\n net = NeuralNetClassifier(\n models.MpraCNN,\n\n optimizer=torch.optim.Adam,\n optimizer__weight_decay=l2,\n lr=lr,\n batch_size=bs,\n max_epochs=epochs,\n\n module__n_filt=n_filt,\n module__width=width,\n module__lin_units=lin_units,\n\n callbacks=[auc, apr],\n iterator_train__shuffle=True,\n train_split=None,\n verbose=0\n )\n \n kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1000)\n np.random.seed(1000)\n torch.manual_seed(1000)\n cv_scores = cross_val_predict(net, X, y, cv=kf,\n method='predict_proba', n_jobs=-1)\n return roc_auc_score(y, cv_scores[:, 1])\n \n elif args.model == 'neighbors':\n def objective(trial):\n auc = EpochScoring(scoring='roc_auc', lower_is_better=False)\n apr = EpochScoring(scoring='average_precision', lower_is_better=False)\n lrs = LRScheduler(policy='StepLR', step_size=10, gamma=0.5)\n \n bs = trial.suggest_categorical('batch_size', [256])\n l2 = trial.suggest_uniform('l2', 5e-5, 5e-4)\n lr = trial.suggest_uniform('lr', 5e-5, 5e-4)\n epochs = trial.suggest_categorical('epochs', [30, 40])\n n_filt = trial.suggest_categorical('n_filt', [8, 16, 32])\n width = trial.suggest_categorical('width', [5])\n n_lin1 = trial.suggest_categorical('n_lin1', [400, 600])\n n_lin2 = trial.suggest_categorical('n_lin2', [400])\n\n net = NeuralNetClassifier(\n models.MpraFullCNN,\n\n optimizer=torch.optim.Adam,\n optimizer__weight_decay=l2,\n lr=lr,\n batch_size=bs,\n max_epochs=epochs,\n\n module__n_filt=n_filt,\n module__width=width,\n module__n_lin1=n_lin1,\n module__n_lin2=n_lin2,\n module__nonlin=F.leaky_relu,\n\n callbacks=[auc, apr],\n iterator_train__shuffle=True,\n train_split=None,\n verbose=0\n )\n \n kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1000)\n np.random.seed(1000)\n torch.manual_seed(1000)\n cv_scores = cross_val_predict(net, X, y, cv=kf,\n method='predict_proba', n_jobs=-1)\n return roc_auc_score(y, cv_scores[:, 1])\n print('Starting trials')\n study = optuna.create_study(direction='maximize')\n study.optimize(objective, n_trials=args.iter)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--project', '-p', choices=PROJ_CHOICES, default='mpra_e116')\n parser.add_argument('--model', '-m', default='standard', choices=MODEL_CHOICES,\n help='Which data/model to train on')\n parser.add_argument('--iter', '-i', type=int,\n help='Number of search iterations')\n args = parser.parse_args()\n\n main(args)\n", "repo_name": "fl16180/MpraScreen", "sub_path": "search_hparam.py", "file_name": "search_hparam.py", "file_ext": "py", "file_size_in_byte": 4679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "datasets.load_and_preprocess", "line_number": 24, "usage_type": "call"}, {"api_name": "skorch.callbacks.EpochScoring", "line_number": 28, "usage_type": "call"}, {"api_name": "skorch.callbacks.EpochScoring", "line_number": 29, "usage_type": "call"}, {"api_name": "skorch.callbacks.LRScheduler", "line_number": 30, "usage_type": "call"}, {"api_name": "skorch.NeuralNetClassifier", "line_number": 40, "usage_type": "call"}, {"api_name": "models.MpraCNN", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_predict", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 64, "usage_type": "call"}, {"api_name": "skorch.callbacks.EpochScoring", "line_number": 68, "usage_type": "call"}, {"api_name": "skorch.callbacks.EpochScoring", "line_number": 69, "usage_type": "call"}, {"api_name": "skorch.callbacks.LRScheduler", "line_number": 70, "usage_type": "call"}, {"api_name": "skorch.NeuralNetClassifier", "line_number": 81, "usage_type": "call"}, {"api_name": "models.MpraFullCNN", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.leaky_relu", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 94, "usage_type": "name"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_predict", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 107, "usage_type": "call"}, {"api_name": "optuna.create_study", "line_number": 109, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 114, "usage_type": "call"}]} {"seq_id": "31612205772", "text": "from django.urls import path\nfrom . import views\nfrom re import template\nfrom django.urls import path \nfrom . import views\nfrom django.contrib.auth import views as auth_view\n\nurlpatterns = [\n path('', views.home, name='home1'),\n path('home/', views.homepage, name='home'),\n path('table/', views.students, name='data'),\n path('addrecord/', views.addrecord, name='add'),\n path('activity/', views.activity, name='activity'),\n path('update//', views.updaterecord, name='update'),\n path('delete//', views.deleterecord, name='delete'),\n path('', views.home, name='home'),\n path('register/', views.register, name='register'),\n path('profile/', views.profile, name='profile'),\n path('login/', views.login, name='login'),\n path('logout/', views.logout, name='logout'),\n\n\n]", "repo_name": "Mariam22-hub/student-affairs-website", "sub_path": "updateTable/Table/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 819, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}]} {"seq_id": "71790060284", "text": "import trino\nconn = trino.dbapi.connect(\n host='localhost',\n port=9091,\n user='eliar'\n)\n\n# Execute a cross-database query\ncur = conn.cursor()\ncur.execute(\"\"\"\n SELECT * FROM postgresql1.dwh.machinedim\n UNION ALL\n SELECT * FROM postgresql2.dwh.machinedim\n \"\"\")\nrows = cur.fetchall()\nfor row in rows:\n print(row)", "repo_name": "Efejann0/Trino", "sub_path": "trino-python-code/trino-engine.py", "file_name": "trino-engine.py", "file_ext": "py", "file_size_in_byte": 345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "trino.dbapi.connect", "line_number": 2, "usage_type": "call"}, {"api_name": "trino.dbapi", "line_number": 2, "usage_type": "attribute"}]} {"seq_id": "75243311804", "text": "import parmed\nfrom io import StringIO\n\nparams_sam = parmed.amber.AmberParameterSet.from_leaprc(StringIO(u'SAM = loadMol2 parameters/SAM.mol2\\nloadAmberParams parameters/frcmod.SAM'))\nparams_sam = parmed.openmm.OpenMMParameterSet.from_parameterset(params_sam)\nparams_sam.write('parameters/SAM.xml')\n\nparams_sah = parmed.amber.AmberParameterSet.from_leaprc(StringIO(u'SAH = loadMol2 parameters/SAH.mol2\\nloadAmberParams parameters/frcmod.SAH'))\nparams_sah = parmed.openmm.OpenMMParameterSet.from_parameterset(params_sah)\nparams_sah.write('parameters/SAH.xml')\n\n# convert GAFF here for reproducibility etc. rather than taking it converted from my OpenMM conversion which has not been merged yet - gaff.dat was copied into files/ from AmberTools16\n# gotta set write_unused to True\nparams_gaff = parmed.amber.AmberParameterSet.from_leaprc(StringIO(u'loadAmberParams gaff.dat'))\nparams_gaff = parmed.openmm.OpenMMParameterSet.from_parameterset(params_gaff)\nparams_gaff.write('parameters/gaff.xml', write_unused=True)\n", "repo_name": "choderalab/pimento", "sub_path": "SETD8/Catalytic_Cycle_p11708_scripted/after_antechamber.py", "file_name": "after_antechamber.py", "file_ext": "py", "file_size_in_byte": 1011, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "parmed.amber.AmberParameterSet.from_leaprc", "line_number": 4, "usage_type": "call"}, {"api_name": "parmed.amber", "line_number": 4, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 4, "usage_type": "call"}, {"api_name": "parmed.openmm.OpenMMParameterSet.from_parameterset", "line_number": 5, "usage_type": "call"}, {"api_name": "parmed.openmm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "parmed.amber.AmberParameterSet.from_leaprc", "line_number": 8, "usage_type": "call"}, {"api_name": "parmed.amber", "line_number": 8, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 8, "usage_type": "call"}, {"api_name": "parmed.openmm.OpenMMParameterSet.from_parameterset", "line_number": 9, "usage_type": "call"}, {"api_name": "parmed.openmm", "line_number": 9, "usage_type": "attribute"}, {"api_name": "parmed.amber.AmberParameterSet.from_leaprc", "line_number": 14, "usage_type": "call"}, {"api_name": "parmed.amber", "line_number": 14, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 14, "usage_type": "call"}, {"api_name": "parmed.openmm.OpenMMParameterSet.from_parameterset", "line_number": 15, "usage_type": "call"}, {"api_name": "parmed.openmm", "line_number": 15, "usage_type": "attribute"}]} {"seq_id": "1551033529", "text": "import regex as re # regex string finding/replacing\nimport urllib.parse # convert link characters like %\n\n\n# -- [3] Convert Obsidian type img links to proper md image links\n# Further conversion will be done in the block below\ndef obs_img_to_md_img(pb, page):\n for matched_link in re.findall(\"(?<=\\!\\[\\[)(.*?)(?=\\]\\])\", page):\n link = \"\"\n if \"|\" in matched_link:\n parts = matched_link.split(\"|\")\n link = parts.pop(0)\n alias = \"|\".join(parts)\n new_link = f\"![{alias}](\" + urllib.parse.quote(link) + \")\"\n else:\n new_link = \"![](\" + urllib.parse.quote(matched_link) + \")\"\n\n # Obsidian page inclusions use the same tag...\n # Skip if we don't match image suffixes. Inclusions are handled at the end.\n link = matched_link.split(\"|\")[0]\n if len(link.split(\".\")) == 1 or link.split(\".\")[-1].lower() not in pb.gc(\"included_file_suffixes\", cached=True):\n new_link = f''\n\n safe_link = re.escape(\"![[\" + matched_link + \"]]\")\n page = re.sub(safe_link, new_link, page)\n\n return page\n", "repo_name": "tinsirius/obsidian-html", "sub_path": "obsidianhtml/note2md/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1132, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "41", "api": [{"api_name": "regex.findall", "line_number": 8, "usage_type": "call"}, {"api_name": "urllib.parse.parse.quote", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 14, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 14, "usage_type": "name"}, {"api_name": "urllib.parse.parse.quote", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 16, "usage_type": "name"}, {"api_name": "regex.escape", "line_number": 24, "usage_type": "call"}, {"api_name": "regex.sub", "line_number": 25, "usage_type": "call"}]} {"seq_id": "36778872629", "text": "import matplotlib.pyplot as plt\n\nimport networkx as nx\nimport matplotlib.pyplot\n\ndef bug_fix_permutate(testedges, graph, indivudal_edge_thickness):\n #print testedges\n for i in range(0,len(graph)):\n if testedges[i]!=graph[i]:\n try:\n indbadges=testedges.index(graph[i])\n except Exception as ex:\n indbadges=testedges.index((graph[i][1],graph[i][0]))\n\n testedges[i],testedges[indbadges]=testedges[indbadges],testedges[i]\n indivudal_edge_thickness[i],indivudal_edge_thickness[indbadges]=indivudal_edge_thickness[indbadges],indivudal_edge_thickness[i]\n\n #print indivudal_edge_thickness\n return indivudal_edge_thickness\n\n\ndef draw_graph(graph,individual_edge_thickness, labels=None, graph_layout='shell',\n node_size=1600, node_color='blue', node_alpha=0.3,\n node_text_size=12,\n edge_color='blue', edge_alpha=0.3, edge_tickness=1,\n edge_text_pos=0.3,\n text_font='sans-serif'):\n\n G=nx.Graph()\n G.add_edges_from(graph)\n\n #Fixing autistic bug\n testedges=G.edges()\n individual_edge_thickness=bug_fix_permutate(testedges,graph,individual_edge_thickness)\n\n if graph_layout == 'spring':\n graph_pos=nx.spring_layout(G)\n elif graph_layout == 'spectral':\n graph_pos=nx.spectral_layout(G)\n elif graph_layout == 'random':\n graph_pos=nx.random_layout(G)\n else:\n graph_pos=nx.shell_layout(G)\n\n nx.draw_networkx_nodes(G,graph_pos,node_size=node_size,\n alpha=node_alpha, node_color=node_color)\n nx.draw_networkx_edges(G,graph_pos,width=individual_edge_thickness,\n alpha=edge_alpha,edge_color=edge_color)\n nx.draw_networkx_labels(G, graph_pos,font_size=node_text_size,\n font_family=text_font)\n\n if labels is None:\n labels = [\"\" for element in range(len(graph))]\n\n edge_labels = dict(zip(graph, labels))\n nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=edge_labels,\n label_pos=edge_text_pos)\n\n plt.show()", "repo_name": "lucaderi/sgr", "sub_path": "2011-2020/2017/Maraz/progetto/grapher.py", "file_name": "grapher.py", "file_ext": "py", "file_size_in_byte": 2142, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "41", "api": [{"api_name": "networkx.Graph", "line_number": 29, "usage_type": "call"}, {"api_name": "networkx.spring_layout", "line_number": 37, "usage_type": "call"}, {"api_name": "networkx.spectral_layout", "line_number": 39, "usage_type": "call"}, {"api_name": "networkx.random_layout", "line_number": 41, "usage_type": "call"}, {"api_name": "networkx.shell_layout", "line_number": 43, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 45, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 47, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 49, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edge_labels", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} {"seq_id": "31389944524", "text": "import discord\nimport aiohttp\nimport io\nfrom akito import Embed\nfrom discord.ext import commands\n\n\nclass CommentCmd(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # gae\n @commands.command(\n name = \"comment\",\n aliases=[\"ytcomment\"],\n description = \"Fake YouTube Comment\",\n usage = \"[Member] [Comment]\",\n help = \"You must **tag** `Member` and then write the `Comment`\"\n )\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def Comment(self, ctx, user: discord.Member = None, *, comment):\n if not user:\n user = ctx.author\n\n url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={user.display_avatar}&username={user.display_name}&comment={comment}\"\n\n async with aiohttp.ClientSession() as session:\n response = await session.get(url)\n imageData = io.BytesIO(await response.read())\n file = discord.File(imageData, filename=\"comment.png\")\n\n embed = discord.Embed(colour=ctx.author.color)\n embed.set_image(url=\"attachment://comment.png\")\n embed.set_footer(\n text=f\"Requested By {ctx.author}\", icon_url=ctx.author.display_avatar\n )\n\n await ctx.respond(embed=embed, file=file)\n\n @Comment.error\n async def comment_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n embed = await Embed.missingrequiredargument(self, ctx)\n await ctx.respond(embed = embed, delete_after= 60)\n\n elif isinstance(error, commands.MemberNotFound):\n embed = await Embed.membernotfound(self, ctx)\n await ctx.respond(embed = embed, delete_after= 60)\n\n else:\n pass\n\n\ndef setup(bot):\n bot.add_cog(CommentCmd(bot))\n", "repo_name": "eitozx/AkitoBot", "sub_path": "extension/image/comment.py", "file_name": "comment.py", "file_ext": "py", "file_size_in_byte": 1818, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 34, "dataset": "github-code", "pt": "41", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 8, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 8, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 21, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 27, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "discord.ext.commands.cooldown", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 20, "usage_type": "name"}, {"api_name": "discord.ext.commands.BucketType", "line_number": 20, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 42, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 42, "usage_type": "name"}, {"api_name": "akito.Embed.missingrequiredargument", "line_number": 43, "usage_type": "call"}, {"api_name": "akito.Embed", "line_number": 43, "usage_type": "name"}, {"api_name": "discord.ext.commands.MemberNotFound", "line_number": 46, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 46, "usage_type": "name"}, {"api_name": "akito.Embed.membernotfound", "line_number": 47, "usage_type": "call"}, {"api_name": "akito.Embed", "line_number": 47, "usage_type": "name"}]} {"seq_id": "40035044910", "text": "from bson import json_util\nfrom flask import make_response\nfrom flask.ext.restful import Api\nfrom flask.ext.cuddlyrest.views import ListMongoResource, SingleMongoResource\n\n\nclass CuddlyRest(Api):\n \n def __init__(self, **kwargs):\n Api.__init__(self, **kwargs)\n \n def init_app(self, app):\n self.app = app\n self.representation('application/json')(self.json_encode)\n\n def json_encode(self, data, code, headers=None):\n resp = make_response(json_util.dumps(data, indent=4), code)\n if headers:\n resp.headers.extend(headers)\n return resp\n\n def register(self, collection, name):\n collection_resource = SingleMongoResource(collection)\n collection_list = ListMongoResource(collection)\n self.add_resource(collection_resource, '/%s/'\n % name,\n endpoint=name + '_single',\n document=collection)\n self.add_resource(collection_list, '/%s' % name,\n endpoint=name + '_multiple',\n document=collection)\n\n def run(self, *args, **kwargs):\n self.app.run(*args, **kwargs)\n\n def add_resource(self, resource, *urls, **kwargs):\n \"\"\"Adds a resource to the api.\n\n :param resource: the class name of your resource\n :type resource: :class:`Resource`\n :param urls: one or more url routes to match for the resource, standard\n flask routing rules apply. Any url variables will be\n passed to the resource method as args.\n :type urls: str\n\n :param endpoint: endpoint name (defaults to\n :meth:`Resource.__name__.lower`\n Can be used to reference this route in :class:`fields.Url` fields\n :type endpoint: str\n\n Additional keyword arguments not specified above will be passed as-is\n to :meth:`flask.Flask.add_url_rule`.\n\n Examples::\n\n api.add_resource(HelloWorld, '/', '/hello')\n api.add_resource(Foo, '/foo', endpoint=\"foo\")\n api.add_resource(FooSpecial, '/special/foo', endpoint=\"foo\")\n\n \"\"\"\n endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()\n self.endpoints.add(endpoint)\n\n if endpoint in self.app.view_functions.keys():\n previous_view_class = (self.app.view_functions[endpoint]\n .__dict__['view_class'])\n\n # if you override the endpoint with a different class, avoid the\n # collision by raising an exception\n if previous_view_class != resource:\n raise ValueError(\n 'This endpoint (%s) is already set to the class %s.'\n % (endpoint, previous_view_class.__name__))\n\n resource.endpoint = endpoint\n resource_func = self.output(resource.as_view(endpoint, **kwargs))\n\n for decorator in self.decorators:\n resource_func = decorator(resource_func)\n\n for url in urls:\n self.app.add_url_rule(self.prefix + url, view_func=resource_func)\n", "repo_name": "wuurrd/Flask-CuddlyRest", "sub_path": "flask_cuddlyrest/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3138, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.ext.restful.Api", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.ext.restful.Api.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.ext.restful.Api", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 17, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 17, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.ext.cuddlyrest.views.SingleMongoResource", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.ext.cuddlyrest.views.ListMongoResource", "line_number": 24, "usage_type": "call"}]} {"seq_id": "39751505548", "text": "import websocket, json, requests, time, threading\r\nimport pandas as pd\r\n\r\n# ----------------------------------------------------- MAIN -----------------------------------------------------\r\n\r\ndef main():\r\n # INITIALIZE DATA\r\n initialize()\r\n\r\n # WEBSOCKET THREAD\r\n thread1 = threading.Thread(target=websocket_dataflow)\r\n\r\n # POSITION THREAD\r\n thread2 = threading.Thread(target=position_payload)\r\n\r\n # STARTS THREADS\r\n thread1.start()\r\n thread2.start()\r\n\r\n# ----------------------------------------------------- GLOBAL -----------------------------------------------------\r\n\r\n# DATA\r\nstock = []\r\nstock_filtered = []\r\nstock_ordered = {}\r\nstock_data = {}\r\norders = {}\r\n\r\n# KEYS\r\nAPI_KEY = 'PKNWDZT640Q786J4Q3ZP'\r\nAPI_SECRET_KEY = 'CnHr41rc62hIzxh27bqrSBLnq1kZa3yENBg1BKp4'\r\nSUB = \"sip\"\r\n\r\n# TRADING URLS\r\nBASE_TRADE_URL = \"https://paper-api.alpaca.markets\"\r\nACCOUNT_URL = \"{}/v2/account\".format(BASE_TRADE_URL)\r\nORDERS_URL = \"{}/v2/orders\".format(BASE_TRADE_URL)\r\nPOSITIONS_URL = \"{}/v2/positions\".format(BASE_TRADE_URL)\r\nHEADERS = {'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': API_SECRET_KEY}\r\n\r\n# DATA URLS\r\nBASE_DATA_URL = \"https://data.alpaca.markets/v2/stocks/snapshots?symbols=\"\r\nCLOCK_URL = \"https://api.alpaca.markets/v2/clock\"\r\n\r\n# ----------------------------------------------------- INITIALIZE -----------------------------------------------------\r\n\r\ndef initialize():\r\n # STOCK LIST SETUP (S&P 500 LIST)\r\n sp_500_ticker = pd.read_html(\"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies\")\r\n sp_500_ticker = sp_500_ticker[0]\r\n stock = sp_500_ticker['Symbol'].values.tolist()\r\n symbol_string = \"\"\r\n for symbol in stock:\r\n symbol_string = symbol_string + symbol + \",\"\r\n symbol_string = symbol_string[0:len(symbol_string)-1]\r\n DATA_URL = BASE_DATA_URL + symbol_string\r\n\r\n # DATA REQUEST\r\n request = requests.get(DATA_URL, headers=HEADERS)\r\n data = request.json()\r\n\r\n # BOUNDS + DICT SETUP\r\n for key in data:\r\n if (data[key] != None): \r\n stock_data[key] = {}\r\n stock_data[key][\"current_price\"] = \"\"\r\n delta = (data[key][\"prevDailyBar\"][\"h\"] - data[key][\"prevDailyBar\"][\"l\"]) / 2\r\n stock_data[key][\"current_bar\"] = [data[key][\"dailyBar\"][\"l\"], data[key][\"dailyBar\"][\"h\"]]\r\n stock_data[key][\"bounds\"] = [data[key][\"prevDailyBar\"][\"l\"] - delta, data[key][\"prevDailyBar\"][\"h\"] + delta]\r\n if (stock_data[key][\"current_bar\"][0] < stock_data[key][\"bounds\"][0] or stock_data[key][\"current_bar\"][1] > stock_data[key][\"bounds\"][1]):\r\n del stock_data[key]\r\n else:\r\n stock_filtered.append(key)\r\n\r\n# ----------------------------------------------------- WEBSOCKET -----------------------------------------------------\r\n\r\ndef websocket_dataflow():\r\n\r\n # SOCKET LINK\r\n socket = \"wss://stream.data.alpaca.markets/v2/\" + SUB\r\n\r\n # SUBSCRIBE\r\n def on_open(ws):\r\n print(\"\\nConnection sucess...\", \"\\n\")\r\n # AUTH\r\n message = {\"action\": \"auth\", \"key\": API_KEY, \"secret\": API_SECRET_KEY}\r\n ws.send(json.dumps(message))\r\n # MESSAGE TO SEND\r\n message = {\"action\":\"subscribe\", \"bars\":stock_filtered}\r\n ws.send(json.dumps(message))\r\n\r\n # PARSE DATA\r\n def on_message(ws, message):\r\n recieved = json.loads(message)\r\n for x in recieved:\r\n if (x[\"T\"] == \"b\"):\r\n stock_data[x[\"S\"]][\"current_price\"] = x[\"c\"]\r\n #print(x[\"S\"], \" updated: \", stock_data[x[\"S\"]], \"\\n\")\r\n \r\n # ERROR\r\n def on_error(ws, message):\r\n print(\"Error: \" + message, \"\\n\")\r\n\r\n def on_close():\r\n print(\"Connection terminated...\", \"\\n\")\r\n\r\n # CONNECT TO SERVER\r\n ws = websocket.WebSocketApp(socket, on_open=on_open, on_message=on_message, on_error=on_error, on_close=on_close)\r\n ws.run_forever()\r\n\r\n# ----------------------------------------------------- POSITION PAYLOAD -----------------------------------------------------\r\n\r\ndef position_payload():\r\n open_trades = 0\r\n while True:\r\n # RUNS EVERY 60 SEC\r\n time.sleep(60)\r\n\r\n # BUYS STOCK (SWAPPED THE IF STATEMENT )\r\n for stock in stock_data:\r\n if (stock_data[stock][\"current_price\"] != \"\" and not(stock in stock_ordered) and open_trades < 10):\r\n if (stock_data[stock][\"current_price\"] < stock_data[stock][\"bounds\"][0] and open_trades < 10):\r\n short_order(stock)\r\n open_trades += 1\r\n print(open_trades)\r\n elif (stock_data[stock][\"current_price\"] > stock_data[stock][\"bounds\"][1] and open_trades < 10):\r\n buy_order(stock)\r\n open_trades += 1\r\n print(open_trades)\r\n\r\n # UPDATES ORDERS\r\n update_orders()\r\n\r\n # SELLS STOCK\r\n for order in list(orders):\r\n if ((orders[order][\"status\"] == \"new\" or orders[order][\"status\"] == \"filled\" or orders[order][\"status\"] == \"accepted\")):\r\n print(\"order: \", order)\r\n if (time_to_sell(orders[order][\"filled_at\"])):\r\n print(\"completed time_to_sell\")\r\n sell_position(orders[order][\"symbol\"])\r\n del orders[order]\r\n open_trades -= 1\r\n print(open_trades)\r\n \r\n# COMPARE TIME\r\n\r\ndef time_to_sell(order_time):\r\n print(\"time_to_sell\")\r\n request = requests.get(CLOCK_URL, headers=HEADERS)\r\n data = request.json()\r\n current_time = data['timestamp']\r\n order_time = order_time[11 : 16]\r\n current_time = current_time[11 : 16]\r\n print(current_time)\r\n print(order_time)\r\n difference = (int((int(current_time[0:2]) - int(order_time[0:2])) * 60) + int((int(current_time[3:5]) - int(order_time[3:5])))) + 240\r\n print(difference)\r\n return difference >= 30\r\n\r\n# POSITIONS\r\n\r\ndef sell_position(symbol):\r\n url = POSITIONS_URL + \"/\" + symbol \r\n r = requests.delete(url, headers=HEADERS)\r\n print(\"Position Sold: \", json.loads(r.content), \"\\n\")\r\n\r\ndef sell_all_position():\r\n url = POSITIONS_URL + \"?cancel_orders=true\"\r\n r = requests.delete(url, headers=HEADERS)\r\n print(\"All Position Sold: \", json.loads(r.content), \"\\n\")\r\n\r\n# ORDERS\r\ndef buy_order(symbol):\r\n data = {\r\n \"symbol\": symbol,\r\n \"notional\": \"10000.00\",\r\n \"side\": \"buy\",\r\n \"type\": \"market\",\r\n \"time_in_force\": \"day\"\r\n }\r\n order(data)\r\n\r\n\r\ndef short_order(symbol):\r\n qty = int(10000/stock_data[symbol][\"current_price\"])\r\n data = {\r\n \"symbol\": symbol,\r\n \"qty\": qty,\r\n \"side\": \"sell\",\r\n \"type\": \"market\",\r\n \"time_in_force\": \"day\"\r\n }\r\n order(data)\r\n\r\ndef order(data):\r\n stock_ordered[data[\"symbol\"]] = True\r\n r = requests.post(ORDERS_URL, json=data, headers=HEADERS)\r\n print(\"Order Bought: \", json.loads(r.content), \"\\n\")\r\n\r\ndef cancel_orders():\r\n r = requests.delete(ORDERS_URL, headers=HEADERS)\r\n print(\"Order Canceled: \", json.loads(r.content), \"\\n\")\r\n\r\ndef update_orders():\r\n r = requests.get(ORDERS_URL, headers=HEADERS)\r\n data = json.loads(r.content)\r\n for order in data:\r\n orders[order['client_order_id']] = {\"symbol\":order['symbol'], \"status\": order['status'], \"filled_at\":order['created_at']}\r\n print(\"Updated Orders: \", orders, \"\\n\")\r\n\r\n# ----------------------------------------------------- RUN -----------------------------------------------------\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n# ---------------------------------------------------------------------------------------------------------------", "repo_name": "JSidle/StockBot", "sub_path": "StockBot.py", "file_name": "StockBot.py", "file_ext": "py", "file_size_in_byte": 7694, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "threading.Thread", "line_number": 11, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 87, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 94, "usage_type": "call"}, {"api_name": "websocket.WebSocketApp", "line_number": 108, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 149, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 164, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 165, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 169, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 170, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 197, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 198, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 201, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 202, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 205, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 206, "usage_type": "call"}]} {"seq_id": "34364317779", "text": "import torch\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import f1_score, confusion_matrix, accuracy_score\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.utils.data.sampler import Sampler\n\nfrom folktables import ACSDataSource, ACSIncome, ACSEmployment\nfrom fairbatch_local import FairBatch\n\ndef load_celeba_partition(img_list, celeba_feat_dir, ydict, groupdict):\n x, y, group = [], [], []\n for img in img_list:\n feat_path = os.path.join(celeba_feat_dir, img[:-3] + 'npy')\n if not os.path.exists(feat_path):\n continue\n\n feat = np.load(feat_path)\n x.append(feat)\n y.append(ydict[img])\n group.append(groupdict[img])\n\n return np.array(x), np.array(y), np.array(group)\n\ndef load_celeba_dataset():\n celeba_dir = '/mnt/LargeDisk/Data/celeba'\n celeba_label_file = os.path.join(celeba_dir, 'list_attr_celeba.csv')\n celeba_partition_file = os.path.join(celeba_dir, 'list_eval_partition.csv')\n celeba_feat_dir = os.path.join(celeba_dir, 'feat_align_celeba')\n\n dflabel = pd.read_csv(celeba_label_file)\n ydict = {img_id: smiling_label==1 for img_id, smiling_label in zip(dflabel['image_id'], dflabel['Smiling'])}\n groupdict = {img_id: 1-max(male_label, 0) for img_id, male_label in zip(dflabel['image_id'], dflabel['Male'])}\n\n dfpart = pd.read_csv(celeba_partition_file)\n img_list = dfpart['image_id']\n partition = dfpart['partition']\n train_img = img_list[partition==0]\n valid_img = img_list[partition==1]\n test_img = img_list[partition==2]\n\n x_train, y_train, group_train = load_celeba_partition(train_img, celeba_feat_dir, ydict, groupdict)\n x_valid, y_valid, group_valid = load_celeba_partition(valid_img, celeba_feat_dir, ydict, groupdict)\n x_test, y_test, group_test = load_celeba_partition(test_img, celeba_feat_dir, ydict, groupdict)\n\n return x_train, y_train, group_train, x_test, y_test, group_test, x_valid, y_valid, group_valid\n\ndef get_dataset(dataset='acsincome', protected_class='sex',\n shuffle_seed=0, batch_size=128, train_shuffle=True,\n fairbatch=False, model=None):\n\n if 'acs' in dataset:\n data_source = ACSDataSource(survey_year='2018', horizon='1-Year', survey='person')\n acs_data = data_source.get_data(states=['CA'], download=True)\n\n if dataset=='acsincome':\n task_class = ACSIncome\n elif dataset=='acsemployment':\n task_class = ACSEmployment\n\n if protected_class=='sex':\n task_class._group = 'SEX'\n features, label, group = task_class.df_to_numpy(acs_data)\n group = group - 1\n elif protected_class=='race':\n task_class._group = 'RAC1P'\n features, label, group = task_class.df_to_numpy(acs_data)\n group[group>1] = 2 # White vs Others\n group = group - 1\n\n x_train, x_test, y_train, y_test, group_train, group_test = train_test_split(features, label, group, test_size=0.2, random_state=0) # Test Split 20%\n x_train, x_valid, y_train, y_valid, group_train, group_valid = train_test_split(x_train, y_train, group_train, test_size=0.1/0.8, random_state=0) # Val Split 10%\n\n elif dataset=='celeba':\n x_train, y_train, group_train, x_test, y_test, group_test, x_valid, y_valid, group_valid = load_celeba_dataset()\n\n ## Shuffle Training Data\n x_train, y_train, group_train = shuffle(x_train, y_train, group_train, random_state=shuffle_seed)\n\n datascaler = MinMaxScaler()\n datascaler.fit(x_train)\n x_train, x_valid, x_test = datascaler.transform(x_train), datascaler.transform(x_valid), datascaler.transform(x_test)\n\n train_dataset = TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train), torch.from_numpy(group_train))\n valid_dataset = TensorDataset(torch.from_numpy(x_valid), torch.from_numpy(y_valid), torch.from_numpy(group_valid))\n test_dataset = TensorDataset(torch.from_numpy(x_test), torch.from_numpy(y_test), torch.from_numpy(group_test))\n\n if fairbatch:\n tensorx_train, tensory_train, tensorgroup_train = torch.from_numpy(x_train), torch.from_numpy(y_train), torch.from_numpy(group_train)\n sampler = FairBatch(model, tensorx_train.cuda().float(), tensory_train.cuda().long(), tensorgroup_train.cuda(), batch_size=128,\n alpha=0.005, target_fairness='eqodds', replacement=False, seed=0)\n trainloader = DataLoader(train_dataset, sampler=sampler)\n else:\n trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle, drop_last=False)\n\n validloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False)\n testloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=False)\n\n return trainloader, validloader, testloader\n", "repo_name": "privacytrustlab/Data-Order-Randomness-versus-Group-Fairness", "sub_path": "dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 5000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "folktables.ACSDataSource", "line_number": 58, "usage_type": "call"}, {"api_name": "folktables.ACSIncome", "line_number": 62, "usage_type": "name"}, {"api_name": "folktables.ACSEmployment", "line_number": 64, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 94, "usage_type": "call"}, {"api_name": "fairbatch_local.FairBatch", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 102, "usage_type": "call"}]} {"seq_id": "36462733258", "text": "import setuptools\nfrom PublicDataReader.config.info import __version__, __author__, __contact__, __github__\n\nwith open(\"requirements.txt\") as f:\n tests_require = f.readlines()\ninstall_requires = [t.strip() for t in tests_require]\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"PublicDataReader\",\n version=__version__,\n license=\"MIT\",\n author=__author__,\n author_email=__contact__,\n description=\"Open Source Public Data Reader\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=__github__,\n packages=setuptools.find_packages(),\n package_data={\"PublicDataReader\": [\"raw/*.json\"]},\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=install_requires,\n)\n", "repo_name": "WooilJeong/PublicDataReader", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 961, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 425, "dataset": "github-code", "pt": "46", "api": [{"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "PublicDataReader.config.info.__version__", "line_number": 13, "usage_type": "name"}, {"api_name": "PublicDataReader.config.info.__author__", "line_number": 15, "usage_type": "name"}, {"api_name": "PublicDataReader.config.info.__contact__", "line_number": 16, "usage_type": "name"}, {"api_name": "PublicDataReader.config.info.__github__", "line_number": 20, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 21, "usage_type": "call"}]} {"seq_id": "2096552073", "text": "import numpy as np\nimport pandas as pd\nimport json\nimport math\nimport random\nimport pickle\nfrom tqdm import tqdm\nimport time\nimport os\n\nclass DataLoader():\n def __init__(self, args, mode, batch, shuffle):\n\n self.args = args\n\n self.data_path = self.args.data_path\n self.anno_path = os.path.join(self.data_path, 'annotation')\n\n self.mode = mode\n self.shuffle = shuffle\n self.batch = batch\n\n self.feature = os.path.join(self.data_path, 'I3D_features')\n self.data_segments = self.gen_dataset()\n self.size = len(self.data_segments)\n self.nbatch = int(self.size / self.batch)\n\n def gen_dataset(self):\n\n alldata = json.load(open(os.path.join(self.anno_path, 'thumos14.json')))['database']\n database = {}\n for video in alldata.keys():\n if alldata[video]['subset'] == self.mode:\n database[video] = alldata[video]\n\n data_segments = [] \n for key, video in database.items():\n t_granularity = self.args.t_granularity/self.args.fps[key]\n t_step = self.args.t_step/self.args.fps[key]\n fealength = int((video['fealength_step4']+self.args.down_sample-1) / self.args.down_sample)\n actions = np.zeros([fealength, self.args.class_num])\n points = np.zeros([2, fealength, self.args.class_num])\n biases = np.zeros([2, fealength, self.args.class_num])\n annotation = video['annotations']\n for anno in annotation:\n # time unit: sec\n s0 = float(anno['segment'][0])\n e0 = float(anno['segment'][1])\n l = e0 - s0\n s1 = max(s0-l/10., 0.0)\n s2 = (s0+l/10.)\n e1 = (e0-l/10.)\n e2 = min(float((fealength-1)*t_step+(t_granularity/2.)), e0+l/10.)\n\n is0 = max(0, round((s0-t_granularity/2.)/t_step))\n is1 = max(0, round((s1-t_granularity/2.)/t_step))\n is2 = max(0, round((s2-t_granularity/2.)/t_step))\n ie0 = min((fealength-1), round((e0-t_granularity/2.)/t_step))\n ie1 = min((fealength-1), round((e1-t_granularity/2.)/t_step))\n ie2 = min((fealength-1), round((e2-t_granularity/2.)/t_step))\n\n\n actions[is0:ie0+1,anno['labelidx']] = 1\n points[0,is1:is2+1,anno['labelidx']] = 1\n points[1,ie1:ie2+1,anno['labelidx']] = 1\n\n if len(biases[0,is1:is2+1,anno['labelidx']]) != len(range(is1,is2+1)) or len(biases[1,ie1:ie2+1,anno['labelidx']]) != len(range(ie1,ie2+1)):\n # print(key,anno['labelidx'],fealength, is1,is2+1,ie1,ie2+1)\n continue\n else:\n biases[0,is1:is2+1,anno['labelidx']] = [s0 - (t*t_step+t_granularity/2.) for t in range(is1,is2+1)] \n biases[1,ie1:ie2+1,anno['labelidx']] = [e0 - (t*t_step+t_granularity/2.) for t in range(ie1,ie2+1)]\n\n data_segments.append((key, fealength, actions, points, biases))\n\n if self.shuffle:\n random.shuffle(data_segments)\n\n return data_segments\n\n\n def gen_train_batch(self, index):\n\n batchdata = self.data_segments[index*self.batch:(index+1)*self.batch]\n aa, pp, bb, ff, mm = [], [], [], [], []\n\n for data in batchdata:\n a = np.zeros([1, self.args.out_window, self.args.class_num])\n p = np.zeros([1, 2, self.args.out_window, self.args.class_num])\n b = np.zeros([1, 2, self.args.out_window, self.args.class_num])\n f = np.zeros([1, self.args.in_window, 2048])\n m = np.zeros([1, self.args.out_window, 1])\n\n key, fealength, actions, points, biases = data\n\n features = np.load(os.path.join(self.feature, key+'.npy'))\n length = features.shape[0]\n\n if fealength <= self.args.out_window:\n a[0,:fealength,:] = actions\n p[0,:,:fealength,:] = points\n b[0,:,:fealength,:] = biases\n f[0,:length,:] = features\n m[0,:fealength,:] = 1\n else:\n actions_sum = np.sum(actions, 1)\n flag = 0\n count = 0\n while flag == 0:\n count += 1\n s = np.random.randint(0, fealength-self.args.out_window+1)\n e = s + self.args.out_window\n if (s == 0 or actions_sum[s] == 0) and (e == fealength or actions_sum[e-1] == 0):\n a[0,:fealength,:] = actions[s:e,:]\n p[0,:,:fealength,:] = points[:,s:e,:]\n b[0,:,:fealength,:] = biases[:,s:e,:]\n tmp_length = features[s*self.args.down_sample:e*self.args.down_sample,:].shape[0]\n f[0,:tmp_length,:] = features[s*self.args.down_sample:e*self.args.down_sample,:]\n m[0,:,:] = 1\n flag = 1\n if count > 1000:\n break\n if flag == 0:\n # print('no good sample')\n a[0,:fealength,:] = actions[0:self.args.out_window,:]\n p[0,:,:fealength,:] = points[:,0:self.args.out_window,:]\n b[0,:,:fealength,:] = biases[:,0:self.args.out_window,:]\n f[0,:,:] = features[0:self.args.in_window,:]\n m[0,:,:] = 1\n aa.append(a)\n pp.append(p)\n bb.append(b)\n ff.append(f)\n mm.append(m)\n aa = np.concatenate(aa)\n pp = np.concatenate(pp)\n bb = np.concatenate(bb)\n ff = np.concatenate(ff)\n mm = np.concatenate(mm)\n\n return np.max(aa,2,keepdims=2), np.max(pp,3,keepdims=3), np.max(bb,3,keepdims=3)+np.min(bb,3,keepdims=3), ff, mm\n\n\n def gen_eval_batch(self, index):\n\n\n key, fealength, actions, points, biases = self.data_segments[index]\n\n features = np.load(os.path.join(self.feature, key+'.npy'))\n\n aa = np.expand_dims(actions, 0)\n pp = np.expand_dims(points, 0)\n bb = np.expand_dims(biases, 0)\n ff = np.expand_dims(features, 0)\n mm = np.ones((1,fealength,1))\n\n return key, np.max(aa,2,keepdims=2), np.max(pp,3,keepdims=3), np.max(bb,3,keepdims=3)+np.min(bb,3,keepdims=3), ff, mm\n\n\n\n\nclass pem_DataLoader():\n def __init__(self, batch, shuffle, datafile, evaluation=False):\n\n self.data = pickle.load(open(datafile, 'rb'))\n self.keys = list(self.data.keys())\n self.batch = batch\n self.num = len(self.keys)\n if shuffle:\n random.shuffle(self.keys)\n\n if evaluation:\n pass\n else:\n ratio = 0.9\n self.train_key = self.keys[:int(self.num*ratio)]\n self.val_key = self.keys[int(self.num*ratio):]\n\n self.train_data = []\n with tqdm(total=len(self.train_key)) as count:\n for key in self.train_key:\n self.train_data += self.data[key]\n count.update(1)\n self.train_num = len(self.train_data)\n self.train_nbatch = int(self.train_num / batch)\n\n self.val_data = []\n with tqdm(total=len(self.val_key)) as count:\n for key in self.val_key:\n self.val_data += self.data[key]\n count.update(1)\n self.val_num = len(self.val_data)\n self.val_nbatch = int(self.val_num / batch)\n\n def generate_batch(self, mode, step):\n\n if mode == 'train':\n pem_data = self.train_data\n else:\n pem_data = self.val_data\n feature = []\n iou = []\n for item in pem_data[step*self.batch:(step+1)*self.batch]:\n feature.append(item[0])\n iou.append(item[1])\n feature = np.vstack(feature)\n iou = np.vstack(iou)\n\n return feature, iou\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "PeisenZhao/Bottom-Up-TAL-with-MR", "sub_path": "dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 8014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 157, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 165, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 170, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 180, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 207, "usage_type": "call"}]} {"seq_id": "29926869924", "text": "import datetime\nfrom flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\napp.secret_key = 'development_key'\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/checkout/submit', methods=['POST'])\ndef checkout_submit():\n print(request.form)\n session['checkout_form'] = request.form\n return redirect('/checkout')\n\n\n@app.route('/checkout')\ndef checkout():\n count = 0\n count += int(session['checkout_form']['strawberry'])\n count += int(session['checkout_form']['raspberry'])\n count += int(session['checkout_form']['apple'])\n session['count'] = count\n session['date'] = datetime.datetime.now()\n return render_template(\"checkout.html\", session=session)\n\n\n@app.route('/fruits')\ndef fruits():\n return render_template(\"fruits.html\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "repo_name": "vaught-dawson/Python_v21.1_Assignments", "sub_path": "flask/fundamentals/dojo_fruit_store/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 879, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}]} {"seq_id": "73227125578", "text": "\"\"\"added some more fields to the requests\n\nRevision ID: 463c6236fa58\nRevises: 02cc87f01969\nCreate Date: 2022-05-22 22:59:36.360116\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '463c6236fa58'\ndown_revision = '02cc87f01969'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pdpa_requests', sa.Column('detail', sa.Text(), nullable=True))\n op.add_column('pdpa_requests', sa.Column('received_at', sa.DateTime(timezone=True), nullable=True))\n op.add_column('pdpa_requests', sa.Column('received_by', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'pdpa_requests', 'staff_account', ['received_by'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'pdpa_requests', type_='foreignkey')\n op.drop_column('pdpa_requests', 'received_by')\n op.drop_column('pdpa_requests', 'received_at')\n op.drop_column('pdpa_requests', 'detail')\n # ### end Alembic commands ###\n", "repo_name": "MUMT-IT/mis2018", "sub_path": "migrations/versions/463c6236fa58_added_some_more_fields_to_the_requests.py", "file_name": "463c6236fa58_added_some_more_fields_to_the_requests.py", "file_ext": "py", "file_size_in_byte": 1141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "46", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}]} {"seq_id": "15257226468", "text": "import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import layers\nimport tensorflow_hub as hub\nfrom collections import deque\nimport random\nimport math\nfrom tensorflow.keras import backend as K\n\n\"\"\"\nwe add two lines:\n\ne= tf.keras.backend.max(y_true,axis = -1)\ny_pred*= K.stack([e]*8, axis=-1)\n \nto make the positions which doesn't contain neither unit or city by zero in the prediction probabilities, in order to focus only on the main occupied positions.\n\"\"\"\n\ndef custom_mean_squared_error(y_true, y_pred):\n y_units_true = y_true[:,:,:,:6]\n y_cities_true = y_true[:,:,:,6:]\n\n y_units_pred = y_pred[:,:,:,:6]\n y_cities_pred = y_pred[:,:,:,6:]\n\n\n is_unit = tf.keras.backend.max(y_units_true,axis = -1)\n is_city = tf.keras.backend.max(y_cities_true,axis = -1)\n\n y_units_pred*= K.stack([is_unit]*6, axis=-1)\n y_cities_pred*= K.stack([is_city]*2, axis=-1)\n\n loss1 = K.square(y_units_pred - y_units_true)#/K.sum(is_unit)\n loss2 = K.square(y_cities_pred - y_cities_true)#/K.sum(is_city)\n return K.concatenate([loss1,loss2])\n\ndef units_accuracy(y_true, y_pred):\n y_units_true = y_true[:,:,:,:6]\n y_cities_true = y_true[:,:,:,6:]\n\n y_units_pred = y_pred[:,:,:,:6]\n y_cities_pred = y_pred[:,:,:,6:]\n\n is_unit = tf.keras.backend.max(y_units_true,axis = -1)\n y_units_pred*= K.stack([is_unit]*6, axis=-1)\n return K.cast(K.equal(y_units_true, K.round(y_units_pred)), \"float32\")/K.sum(is_unit)\n\ndef cities_accuracy(y_true, y_pred):\n y_units_true = y_true[:,:,:,:6]\n y_cities_true = y_true[:,:,:,6:]\n\n y_units_pred = y_pred[:,:,:,:6]\n y_cities_pred = y_pred[:,:,:,6:]\n\n is_city = tf.keras.backend.max(y_cities_true,axis = -1)\n y_cities_pred*= K.stack([is_city]*2, axis=-1)\n\n return K.cast(K.equal(y_cities_true, K.round(y_cities_pred)), \"float32\")/K.sum(is_city)\n\n\ndef get_model(s):\n inputs = keras.Input(shape=(s,s,17),name = 'The game map')\n f = layers.Flatten()(inputs)\n h,w= s,s\n f = layers.Dense(w*h,activation = \"sigmoid\")(f)\n f = layers.Reshape((h,w,-1))(f)\n units = layers.Dense(6,activation = \"softmax\",name = \"Units_actions\")(f)\n cities = layers.Dense(2,activation = \"sigmoid\",name = \"Cities_actions\")(f)\n output = layers.Concatenate()([units,cities])\n model = keras.Model(inputs = inputs, outputs = output)\n model.compile(optimizer= \"adam\", loss= custom_mean_squared_error ,metrics = [\"accuracy\"])\n\n return model\n\n\nmodel =get_model(12)\nmodel.summary()\n\ntf.keras.utils.plot_model(\n model,\n to_file=\"model.png\",\n show_shapes=1,\n show_dtype=1,\n show_layer_names=True,\n rankdir=\"TB\",\n expand_nested=False,\n dpi=96)", "repo_name": "Ruben1701/Kaggle_Ai_Challenge", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 2671, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "tensorflow.keras.backend.max", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.max", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.stack", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 31, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.stack", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.square", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 34, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.square", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 35, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.concatenate", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 36, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.max", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.stack", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.cast", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 47, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.equal", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.sum", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.max", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.stack", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 57, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.cast", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 59, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.equal", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.sum", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras.Input", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 63, "usage_type": "name"}, {"api_name": "keras.layers.Flatten", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 64, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 66, "usage_type": "name"}, {"api_name": "keras.layers.Reshape", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 67, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 69, "usage_type": "name"}, {"api_name": "keras.layers.Concatenate", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 70, "usage_type": "name"}, {"api_name": "tensorflow.keras.Model", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 71, "usage_type": "name"}, {"api_name": "tensorflow.keras.utils.plot_model", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 80, "usage_type": "attribute"}]} {"seq_id": "5579289730", "text": "from qcc import Qcc\n\nimport consts\nimport utils\nimport json\n\ndef query_industry(industry:str):\n \"\"\"\n 查询企业行业主函数\n industry:行业\n \"\"\"\n qcc = Qcc()\n detail_url = qcc.query_url(industry)\n if not detail_url:\n print('未成功获取到公司详情页链接,请检查公司名错误或其他错误')\n return ''\n else:\n industry = qcc.query_industry(detail_url)\n if industry != '':\n return utils.get_industry(industry)\n else:\n return ''\n \nif __name__ == '__main__':\n query_industry('腾讯')\n", "repo_name": "Jeremylee1234/query_industry", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "qcc.Qcc", "line_number": 12, "usage_type": "call"}, {"api_name": "qcc.query_url", "line_number": 13, "usage_type": "call"}, {"api_name": "qcc.query_industry", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.get_industry", "line_number": 20, "usage_type": "call"}]} {"seq_id": "71339995964", "text": "import numpy as np\nimport os\nimport tempfile\nimport unittest\nimport cv2\nimport torch\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.structures import BoxMode, Instances, RotatedBoxes\nfrom detectron2.utils.visualizer import ColorMode, Visualizer\n\n\nclass TestVisualizer(unittest.TestCase):\n def _random_data(self):\n H, W = 100, 100\n N = 10\n img = np.random.rand(H, W, 3) * 255\n boxxy = np.random.rand(N, 2) * (H // 2)\n boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)\n\n def _rand_poly():\n return np.random.rand(3, 2).flatten() * H\n\n polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]\n\n mask = np.zeros_like(img[:, :, 0], dtype=bool)\n mask[:40, 10:20] = 1\n\n labels = [str(i) for i in range(N)]\n return img, boxes, labels, polygons, [mask] * N\n\n @property\n def metadata(self):\n return MetadataCatalog.get(\"coco_2017_train\")\n\n def test_draw_dataset_dict(self):\n img = np.random.rand(512, 512, 3) * 255\n dic = {\n \"annotations\": [\n {\n \"bbox\": [\n 368.9946492271106,\n 330.891438763377,\n 13.148537455410235,\n 13.644708680142685,\n ],\n \"bbox_mode\": BoxMode.XYWH_ABS,\n \"category_id\": 0,\n \"iscrowd\": 1,\n \"segmentation\": {\n \"counts\": \"_jh52m?2N2N2N2O100O10O001N1O2MceP2\",\n \"size\": [512, 512],\n },\n }\n ],\n \"height\": 512,\n \"image_id\": 1,\n \"width\": 512,\n }\n v = Visualizer(img)\n v.draw_dataset_dict(dic)\n\n v = Visualizer(img, self.metadata)\n v.draw_dataset_dict(dic)\n\n def test_draw_rotated_dataset_dict(self):\n img = np.random.rand(512, 512, 3) * 255\n dic = {\n \"annotations\": [\n {\n \"bbox\": [\n 368.9946492271106,\n 330.891438763377,\n 13.148537455410235,\n 13.644708680142685,\n 45.0,\n ],\n \"bbox_mode\": BoxMode.XYWHA_ABS,\n \"category_id\": 0,\n \"iscrowd\": 1,\n }\n ],\n \"height\": 512,\n \"image_id\": 1,\n \"width\": 512,\n }\n v = Visualizer(img, self.metadata)\n v.draw_dataset_dict(dic)\n\n def test_overlay_instances(self):\n img, boxes, labels, polygons, masks = self._random_data()\n\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n # Test 2x scaling\n v = Visualizer(img, self.metadata, scale=2.0)\n output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape[0], img.shape[0] * 2)\n\n # Test overlay masks\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n def test_overlay_instances_no_boxes(self):\n img, boxes, labels, polygons, _ = self._random_data()\n v = Visualizer(img, self.metadata)\n v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()\n\n def test_draw_instance_predictions(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.asarray(masks))\n\n v = Visualizer(img)\n v.draw_instance_predictions(inst)\n\n v = Visualizer(img, self.metadata)\n v.draw_instance_predictions(inst)\n\n def test_BWmode_nomask(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n\n v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)\n v.draw_instance_predictions(inst)\n\n # check that output is grayscale\n inst = inst[:0]\n v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)\n output = v.draw_instance_predictions(inst).get_image()\n self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 1]))\n self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 2]))\n\n def test_draw_empty_mask_predictions(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))\n\n v = Visualizer(img, self.metadata)\n v.draw_instance_predictions(inst)\n\n def test_correct_output_shape(self):\n img = np.random.rand(928, 928, 3) * 255\n v = Visualizer(img, self.metadata)\n out = v.output.get_image()\n self.assertEqual(out.shape, img.shape)\n\n def test_overlay_rotated_instances(self):\n H, W = 100, 150\n img = np.random.rand(H, W, 3) * 255\n num_boxes = 50\n boxes_5d = torch.zeros(num_boxes, 5)\n boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)\n boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)\n boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))\n boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))\n boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)\n rotated_boxes = RotatedBoxes(boxes_5d)\n labels = [str(i) for i in range(num_boxes)]\n\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n def test_draw_no_metadata(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.asarray(masks))\n\n v = Visualizer(img, MetadataCatalog.get(\"asdfasdf\"))\n v.draw_instance_predictions(inst)\n\n def test_draw_binary_mask(self):\n img, boxes, _, _, masks = self._random_data()\n img[:, :, 0] = 0 # remove red color\n mask = masks[0]\n mask_with_hole = np.zeros_like(mask).astype(\"uint8\")\n mask_with_hole = cv2.rectangle(mask_with_hole, (10, 10), (50, 50), 1, 5)\n\n for m in [mask, mask_with_hole]:\n for save in [True, False]:\n v = Visualizer(img)\n o = v.draw_binary_mask(m, color=\"red\", text=\"test\")\n if save:\n with tempfile.TemporaryDirectory(prefix=\"detectron2_viz\") as d:\n path = os.path.join(d, \"output.png\")\n o.save(path)\n o = cv2.imread(path)[:, :, ::-1]\n else:\n o = o.get_image().astype(\"float32\")\n # red color is drawn on the image\n self.assertTrue(o[:, :, 0].sum() > 0)\n\n def test_draw_soft_mask(self):\n img = np.random.rand(100, 100, 3) * 255\n img[:, :, 0] = 0 # remove red color\n mask = np.zeros((100, 100), dtype=np.float32)\n mask[30:50, 40:50] = 1.0\n cv2.GaussianBlur(mask, (21, 21), 10)\n\n v = Visualizer(img)\n o = v.draw_soft_mask(mask, color=\"red\", text=\"test\")\n o = o.get_image().astype(\"float32\")\n # red color is drawn on the image\n self.assertTrue(o[:, :, 0].sum() > 0)\n\n # test draw empty mask\n v = Visualizer(img)\n o = v.draw_soft_mask(np.zeros((100, 100), dtype=np.float32), color=\"red\", text=\"test\")\n o = o.get_image().astype(\"float32\")\n\n def test_border_mask_with_holes(self):\n H, W = 200, 200\n img = np.zeros((H, W, 3))\n img[:, :, 0] = 255.0\n v = Visualizer(img, scale=3)\n\n mask = np.zeros((H, W))\n mask[:, 100:150] = 1\n # create a hole, to trigger imshow\n mask = cv2.rectangle(mask, (110, 110), (130, 130), 0, thickness=-1)\n output = v.draw_binary_mask(mask, color=\"blue\")\n output = output.get_image()[:, :, ::-1]\n\n first_row = {tuple(x.tolist()) for x in output[0]}\n last_row = {tuple(x.tolist()) for x in output[-1]}\n # Check quantization / off-by-1 error: the first and last row must have two colors\n self.assertEqual(len(last_row), 2)\n self.assertEqual(len(first_row), 2)\n self.assertIn((0, 0, 255), last_row)\n self.assertIn((0, 0, 255), first_row)\n\n def test_border_polygons(self):\n H, W = 200, 200\n img = np.zeros((H, W, 3))\n img[:, :, 0] = 255.0\n v = Visualizer(img, scale=3)\n mask = np.zeros((H, W))\n mask[:, 100:150] = 1\n\n output = v.draw_binary_mask(mask, color=\"blue\")\n output = output.get_image()[:, :, ::-1]\n\n first_row = {tuple(x.tolist()) for x in output[0]}\n last_row = {tuple(x.tolist()) for x in output[-1]}\n # Check quantization / off-by-1 error:\n # the first and last row must have >=2 colors, because the polygon\n # touches both rows\n self.assertGreaterEqual(len(last_row), 2)\n self.assertGreaterEqual(len(first_row), 2)\n self.assertIn((0, 0, 255), last_row)\n self.assertIn((0, 0, 255), first_row)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "repo_name": "facebookresearch/detectron2", "sub_path": "tests/test_visualizer.py", "file_name": "test_visualizer.py", "file_ext": "py", "file_size_in_byte": 10378, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27217, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 26, "usage_type": "call"}, {"api_name": "detectron2.data.MetadataCatalog.get", "line_number": 34, "usage_type": "call"}, {"api_name": "detectron2.data.MetadataCatalog", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "detectron2.structures.BoxMode.XYWH_ABS", "line_number": 47, "usage_type": "attribute"}, {"api_name": "detectron2.structures.BoxMode", "line_number": 47, "usage_type": "name"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 60, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "detectron2.structures.BoxMode.XYWHA_ABS", "line_number": 78, "usage_type": "attribute"}, {"api_name": "detectron2.structures.BoxMode", "line_number": 78, "usage_type": "name"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 87, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 93, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 98, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 103, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 109, "usage_type": "call"}, {"api_name": "detectron2.structures.Instances", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 119, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 121, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 124, "usage_type": "call"}, {"api_name": "detectron2.structures.Instances", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 133, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 135, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.ColorMode.IMAGE_BW", "line_number": 135, "usage_type": "attribute"}, {"api_name": "detectron2.utils.visualizer.ColorMode", "line_number": 135, "usage_type": "name"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 140, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.ColorMode.IMAGE_BW", "line_number": 140, "usage_type": "attribute"}, {"api_name": "detectron2.utils.visualizer.ColorMode", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 143, "usage_type": "call"}, {"api_name": "detectron2.structures.Instances", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 152, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 158, "usage_type": "attribute"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 165, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 172, "usage_type": "call"}, {"api_name": "detectron2.structures.RotatedBoxes", "line_number": 173, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 176, "usage_type": "call"}, {"api_name": "detectron2.structures.Instances", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 187, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 189, "usage_type": "call"}, {"api_name": "detectron2.data.MetadataCatalog.get", "line_number": 189, "usage_type": "call"}, {"api_name": "detectron2.data.MetadataCatalog", "line_number": 189, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 196, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 197, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 201, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 214, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 216, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 218, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 220, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 233, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 237, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 254, "usage_type": "call"}, {"api_name": "detectron2.utils.visualizer.Visualizer", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 257, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 275, "usage_type": "call"}]} {"seq_id": "14364668211", "text": "from actions.service_recommender_action import RecommendServices\nfrom rasa_sdk.executor import CollectingDispatcher\nimport unittest\nfrom unittest.mock import MagicMock, patch\nimport json\nimport sys\nimport os\nsys.path.append('actions')\n\nSERVICE_RECOMMENDER_JSON_RESPONSE_SUCCESS = [\n {\n \"service\": {\n \"id\": \"8c6e25e9-e186-49fd-852c-f6f168d1351f\",\n \"type\": \"Service\",\n \"subtype\": None,\n \"organizations\": [\n {\n \"id\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"name\": \"Kela\"\n },\n {\n \"id\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"name\": \"Kela\"\n }\n ],\n \"name\": {\n \"en\": \"Kela's benefits for the unemployed\",\n \"fi\": \"Kelan tuet työttömille\",\n \"sv\": \"FPA:s stöd för arbetslösa\"\n },\n \"descriptions\": {\n \"en\": [\n {\n \"value\": \"* labour market subsidy\\n* basic unemployment allowance\\n* commuting and relocation allowance\\n* job alternation compensation\",\n \"type\": \"Description\"\n },\n {\n \"value\": \"Unemployed\",\n \"type\": \"Summary\"\n }\n ],\n \"fi\": [\n {\n \"value\": \"* työmarkkinatuki\\n* peruspäiväraha\\n* liikkuvuusavustus\\n* vuorottelukorvaus\",\n \"type\": \"Description\"\n },\n {\n \"value\": \"Työttömät\",\n \"type\": \"Summary\"\n }\n ],\n \"sv\": [\n {\n \"value\": \"Arbetslösa\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"* arbetsmarknadsstöd\\n* grunddagpenning \\n* rörlighetsunderstöd\\n* alterneringsersättning\",\n \"type\": \"Description\"\n }\n ]\n },\n \"requirement\": {\n \"en\": \"http://www.kela.fi/unemployment\",\n \"fi\": \"http://www.kela.fi/tyottomat\",\n \"sv\": \"www.fpa.fi/utanarbete\"\n },\n \"targetGroups\": {\n \"en\": [\n {\n \"name\": \"Finnish startups\",\n \"code\": \"KR2.8\"\n },\n {\n \"name\": \"Businesses and non-government organizations\",\n \"code\": \"KR2\"\n },\n {\n \"name\": \"Businesses operating in the domestic (Finnish) market\",\n \"code\": \"KR2.3\"\n },\n {\n \"name\": \"Citizens\",\n \"code\": \"KR1\"\n }\n ],\n \"fi\": [\n {\n \"name\": \"Yrityksen perustajat kotimaassa\",\n \"code\": \"KR2.8\"\n },\n {\n \"name\": \"Yritykset ja yhteisöt\",\n \"code\": \"KR2\"\n },\n {\n \"name\": \"Kotimarkkinoilla toimivat yritykset\",\n \"code\": \"KR2.3\"\n },\n {\n \"name\": \"Kansalaiset\",\n \"code\": \"KR1\"\n }\n ],\n \"sv\": [\n {\n \"name\": \"Inhemska företagsgrundare\",\n \"code\": \"KR2.8\"\n },\n {\n \"name\": \"Företag och samfund\",\n \"code\": \"KR2\"\n },\n {\n \"name\": \"Företag pÃ¥ den inhemska marknaden\",\n \"code\": \"KR2.3\"\n },\n {\n \"name\": \"Medborgare\",\n \"code\": \"KR1\"\n }\n ]\n },\n \"serviceClasses\": {\n \"en\": [\n {\n \"name\": \"Support and benefits for the unemployed\",\n \"description\": \"This service subclass contains different types of financial support for unemployed jobseekers, support eligibility criteria and services related to applying for support.\",\n \"code\": \"P10.6\"\n },\n {\n \"name\": \"Working life rules and collective agreements\",\n \"description\": \"This service subclass contains issues related to employment contracts and terms of employment, pay, and equality and flexibility in working life, including telework and part-time work, from the service point of view.\",\n \"code\": \"P10.3\"\n }\n ],\n \"fi\": [\n {\n \"name\": \"Työttömän tuet ja etuudet\",\n \"description\": \"Tässä palvelualaluokassa käsitellään työttömälle työnhakijalle suunnattuja erilaisia taloudellisia tukia, niiden saamisen edellytyksiä ja tukien hakupalveluja.\",\n \"code\": \"P10.6\"\n },\n {\n \"name\": \"Työelämän säännöt ja työehtosopimukset\",\n \"description\": \"Tähän palvelualaluokkaan kuuluvat palvelujen näkökulmasta työsopimuksiin ja -ehtoihin, palkkaukseen, työelämän yhdenvertaisuuteen ja joustoihin kuten etä- ja osa-aikatyöhön liittyvät asiat.\",\n \"code\": \"P10.3\"\n }\n ],\n \"sv\": [\n {\n \"name\": \"Stöd och förmÃ¥ner för arbetslösa\",\n \"description\": \"I denna serviceundergrupp behandlas olika ekonomiska stödformer för arbetslösa arbetssökande, förutsättningar för beviljande av dem och tjänster för ansökan om stöd.\",\n \"code\": \"P10.6\"\n },\n {\n \"name\": \"Arbetslivets regler och kollektivavtal\",\n \"description\": \"Denna serviceundergrupp omfattar ärenden relaterade till arbetsavtal och -villkor, löner, jämställdhet och flexibilitet i arbetslivet, sÃ¥som distans- och deltidsarbete.\",\n \"code\": \"P10.3\"\n }\n ]\n },\n \"areas\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"lastUpdated\": \"2021-06-15T07:37:25.395000\"\n },\n \"channels\": [\n {\n \"id\": \"16d63b97-0b8f-4f72-95e7-7cc2f9ab9e15\",\n \"type\": \"EChannel\",\n \"areaType\": \"Nationwide\",\n \"organizationId\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"serviceIds\": [\n \"105837fa-97d2-4f9a-916e-09fe7ca19e52\",\n \"b5b82555-9852-4a77-89bd-7dcd332d4f11\",\n \"7d655de8-76fd-4f24-bb92-e8f49e153e88\",\n \"b84af2c2-824f-4b27-a599-fa28de4e437c\",\n \"f8dd3060-543c-47ba-abba-a14fc1feacb3\",\n \"ff059acf-de3f-468d-8b6a-9a492d301cda\",\n \"6529e1c2-b9ac-4f00-8e8b-6d13616ccf81\",\n \"8c6e25e9-e186-49fd-852c-f6f168d1351f\",\n \"3ef3f1ef-6754-4308-8ef4-deef416b081e\",\n \"b5c945e5-a4d6-47b9-9362-0fe0f20adc2e\",\n \"30fe7757-32ad-4ea7-a8e4-857b44f81160\",\n \"dae7ba63-46af-4130-995d-1e88cafaa70c\",\n \"e0556386-0ffa-40f2-98aa-770b42dd792a\",\n \"579991be-f40f-4913-8130-0e07592b50c4\",\n \"e52f663f-df44-426c-b401-147d4ebd19cc\",\n \"aeb60b1d-2872-4841-9704-652246948990\",\n \"76472df7-25ed-4c55-94dd-fa3dd98ee862\",\n \"58a4bf82-dc19-4ca5-a57d-a0ef39d0e89d\",\n \"506d84d5-0ecf-400a-8b74-f9bd990dab7b\",\n \"3456be0a-a126-43af-a364-f24e24786cb1\",\n \"5ecdee89-0459-4b27-8271-206f314b801b\",\n \"b0372b6c-5ab5-4dd1-92e5-bde71dd25488\",\n \"52693b89-c7da-4c61-80df-b6f871672064\",\n \"ad234c6c-e24c-4d0f-8698-81980502278d\",\n \"caba7a03-40b7-439e-871d-1d9081bd3299\",\n \"a0a34972-1af4-41d2-ac89-198ce1875e4f\",\n \"7fd28107-d7d6-4158-96d1-fad0bd8c7499\",\n \"19a31135-a9d6-4926-a20b-bfe7db1780d3\",\n \"05f6a1fd-925f-46f0-b0d2-8a92881710a6\",\n \"9b6eb134-2764-47bd-9b98-e04f3a50b88b\",\n \"b7e6eddc-0f49-4bfa-876c-53fa01ba7907\",\n \"e09e783a-6363-412e-bed2-4082768c914d\",\n \"0157fe90-43d2-40e4-895a-a9446829d1d8\"\n ],\n \"name\": {\n \"en\": \"Kela's online customer service\",\n \"fi\": \"Kelan Asiointipalvelu\",\n \"sv\": \"FPA:s e-tjänst\"\n },\n \"descriptions\": {\n \"en\": [\n {\n \"value\": \"Check your own data, apply for benefits, send supporting documents and report changes.\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"In Kela's online customer service you can check your own data, apply for benefits, send supporting documets and report changes. You can handle almost all your transactions with Kela on the Internet. \",\n \"type\": \"Description\"\n }\n ],\n \"fi\": [\n {\n \"value\": \"Tarkastele omia Kela-tietojasi, hae etuuksia, lähetä liitteitä ja ilmoita muutoksista. Voit hoitaa lähes kaikki Kela-asiasi verkossa.\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"Kelan verkkoasiointipalvelussa voit tarkastella omia Kela-tietojasi, hakea etuuksia, lähettää liitteitä ja ilmoittaa muutoksista. Voit hoitaa lähes kaikki Kela-asiasi verkossa.\",\n \"type\": \"Description\"\n }\n ],\n \"sv\": [\n {\n \"value\": \"Kontrollera dina uppgifter hos FPA, ansök om förmÃ¥ner, skicka bilagor och meddela förändringar. Du kan sköta sÃ¥ gott som alla FPA-ärenden pÃ¥ nätet.\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"I FPA:s e-tjänst kan du kontrollera dina egna uppgifter hos FPA, ansöka om förmÃ¥ner, skicka bilagor och meddela förändringar. Du kan sköta sÃ¥ gott som alla FPA-ärenden pÃ¥ nätet. \",\n \"type\": \"Description\"\n }\n ]\n },\n \"webPages\": {\n \"en\": [\n \"https://asiointi.kela.fi/go_app?lg=en\"\n ],\n \"fi\": [\n \"https://asiointi.kela.fi/go_app\"\n ],\n \"sv\": [\n \"https://asiointi.kela.fi/go_app?lg=sv\"\n ]\n },\n \"emails\": {\n \"en\": [],\n \"fi\": [\n \"tekninentuki@kela.fi\"\n ],\n \"sv\": []\n },\n \"phoneNumbers\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"areas\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"addresses\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"lastUpdated\": \"2021-06-16T07:19:48.498000\"\n },\n {\n \"id\": \"fbeff57b-fdb7-4acc-9344-9d97193bf910\",\n \"type\": \"ServiceLocation\",\n \"areaType\": \"Nationwide\",\n \"organizationId\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"serviceIds\": [\n \"e52f663f-df44-426c-b401-147d4ebd19cc\",\n \"579991be-f40f-4913-8130-0e07592b50c4\",\n \"30fe7757-32ad-4ea7-a8e4-857b44f81160\",\n \"3ef3f1ef-6754-4308-8ef4-deef416b081e\",\n \"aeb60b1d-2872-4841-9704-652246948990\",\n \"76472df7-25ed-4c55-94dd-fa3dd98ee862\",\n \"e0556386-0ffa-40f2-98aa-770b42dd792a\",\n \"6529e1c2-b9ac-4f00-8e8b-6d13616ccf81\",\n \"8c6e25e9-e186-49fd-852c-f6f168d1351f\",\n \"dae7ba63-46af-4130-995d-1e88cafaa70c\",\n \"b5c945e5-a4d6-47b9-9362-0fe0f20adc2e\",\n \"ff059acf-de3f-468d-8b6a-9a492d301cda\"\n ],\n \"name\": {\n \"en\": None,\n \"fi\": \"Haukiputaan palvelupiste\",\n \"sv\": \"Servicestället i Haukipudas\"\n },\n \"descriptions\": {\n \"en\": [],\n \"fi\": [\n {\n \"value\": \"Kelan palvelupiste\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"Kelan palvelupisteessä opastetaan ja neuvotaan kaikissa Kelan etuuksiin liittyvissä asioissa. Voit hakea etuuksia ja toimittaa liitteet myös asiointipalvelussamme osoitteessa www.kela.fi/asiointi. Lue myös mahdollisuudesta ajanvaraukseen: www.kela.fi/ajanvaraus.\",\n \"type\": \"Description\"\n }\n ],\n \"sv\": [\n {\n \"value\": \"Fpa:s serviceställe\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"PÃ¥ FPA:s serviceställe kan du fÃ¥ information och rÃ¥dgivning om alla FPA-förmÃ¥ner. Du kan ocksÃ¥ ansöka om förmÃ¥ner och lämna in bilagor i vÃ¥r e-tjänst pÃ¥ adressen www.fpa.fi/etjanst. Läs mer om hur du bokar tid pÃ¥ adressen www.fpa.fi/tidsbokning.\",\n \"type\": \"Description\"\n }\n ]\n },\n \"webPages\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"emails\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"phoneNumbers\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"areas\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"addresses\": {\n \"en\": [\n {\n \"type\": \"Location\",\n \"subtype\": \"Single\",\n \"streetNumber\": \"15\",\n \"postalCode\": \"90830\",\n \"latitude\": \"7229135.399\",\n \"longitude\": \"422665.198\",\n \"streetName\": \"Simppulantie\",\n \"postOffice\": \"HAUKIPUDAS\",\n \"municipalityCode\": \"564\",\n \"municipalityName\": \"Aura\"\n },\n {\n \"type\": \"Postal\",\n \"subtype\": \"PostOfficeBox\",\n \"streetNumber\": None,\n \"postalCode\": None,\n \"latitude\": None,\n \"longitude\": None,\n \"streetName\": None,\n \"postOffice\": None,\n \"municipalityCode\": None,\n \"municipalityName\": None\n }\n ],\n \"fi\": [\n {\n \"type\": \"Location\",\n \"subtype\": \"Single\",\n \"streetNumber\": \"15\",\n \"postalCode\": \"90830\",\n \"latitude\": \"7229135.399\",\n \"longitude\": \"422665.198\",\n \"streetName\": \"Simppulantie\",\n \"postOffice\": \"HAUKIPUDAS\",\n \"municipalityCode\": \"564\",\n \"municipalityName\": \"Aura\"\n },\n {\n \"type\": \"Postal\",\n \"subtype\": \"PostOfficeBox\",\n \"streetNumber\": None,\n \"postalCode\": None,\n \"latitude\": None,\n \"longitude\": None,\n \"streetName\": None,\n \"postOffice\": None,\n \"municipalityCode\": None,\n \"municipalityName\": None\n }\n ],\n \"sv\": [\n {\n \"type\": \"Location\",\n \"subtype\": \"Single\",\n \"streetNumber\": \"15\",\n \"postalCode\": \"90830\",\n \"latitude\": \"7229135.399\",\n \"longitude\": \"422665.198\",\n \"streetName\": \"Simppulantie\",\n \"postOffice\": \"HAUKIPUDAS\",\n \"municipalityCode\": \"564\",\n \"municipalityName\": \"Aura\"\n },\n {\n \"type\": \"Postal\",\n \"subtype\": \"PostOfficeBox\",\n \"streetNumber\": None,\n \"postalCode\": None,\n \"latitude\": None,\n \"longitude\": None,\n \"streetName\": None,\n \"postOffice\": None,\n \"municipalityCode\": None,\n \"municipalityName\": None\n }\n ]\n },\n \"lastUpdated\": \"2021-06-28T01:00:00.719000\"\n }\n ],\n \"score\": 0.8303728304964242\n },\n]\nSERVICE_RECOMMENDER_JSON_RESPONSE_ERROR = {\n \"detail\": [\n {\n \"loc\": [\n \"body\",\n 48\n ],\n \"msg\": \"Expecting value: line 4 column 1 (char 48)\",\n \"type\": \"value_error.jsondecode\",\n \"ctx\": {\n \"msg\": \"Expecting value\",\n \"doc\": \"{\\n \\\"need_text\\\": \\\"string\\\",\\n \\\"municipality_id\\\":\\n}\",\n \"pos\": 48,\n \"lineno\": 4,\n \"colno\": 1\n }\n }\n ]\n}\n\n# Test class for Rasa Tracker store which contains chatbot user message data\n\n\nclass TestRasaTracker():\n def __init__(self):\n self.slots = {\n 'general_service_search_text': 'olispa kahvia',\n 'municipality': 'turku',\n \"fallback_language\": \"fi\",\n \"session_started_metadata\": {\n \"language\": \"it\"\n }\n }\n self.latest_message = {\n \"intent\": {\n \"id\": -4114183629044666000,\n \"name\": \"public_transport\",\n \"confidence\": 0.9920039772987366\n },\n \"entities\": [],\n \"text\": \"joukkoliikenne\",\n \"message_id\": \"a4d3a71843eb449689e0eb4dc34ca7e9\",\n \"metadata\": {\n \"language\": \"fi\"\n },\n \"intent_ranking\": [\n ]\n }\n self.events = [\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1175656,\n \"name\": \"action_session_start\",\n \"confidence\": 1\n },\n {\n \"event\": \"session_started\",\n \"timestamp\": 1628670810.117589\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.117605,\n \"name\": \"action_listen\"\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670810.1180103,\n \"text\": \"/get_started\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [],\n \"message_id\": \"d2f0600da3bc4648998c9727469121ce\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"name\": \"get_started\",\n \"confidence\": 1\n }\n ]\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"d2f0600da3bc4648998c9727469121ce\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670810.1396117,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1396365,\n \"name\": \"utter_get_started\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670810.1396775,\n \"metadata\": {\n \"template_name\": \"utter_get_started\"\n },\n \"text\": \"Moi! Autan sinua löytämään palveluita eri elämäntilanteisiisi liittyen Varsinais-Suomen alueelta.\\n\\nYmmärrän helpoiten melko lyhyitä viestejä tai voit myös klikkailla nappeja.\",\n \"data\": {}\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1663811,\n \"name\": \"utter_get_started_choose_life_event\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670810.166434,\n \"metadata\": {\n \"template_name\": \"utter_get_started_choose_life_event\"\n },\n \"text\": \"Kuvaile ensiksi, millaiseen elämäntilanteeseen tarvitsisit apua tai voit myös etsiä vapaasti palveluita 😊\",\n \"data\": {\n \"buttons\": [\n {\n \"title\": \"Työttömäksi jääminen\",\n \"type\": \"postback\",\n \"payload\": \"/ke8_losing_job\"\n },\n {\n \"title\": \"Velkaantuminen\",\n \"type\": \"postback\",\n \"payload\": \"/ke9_debt\"\n },\n {\n \"title\": \"Omaisen kuolema\",\n \"type\": \"postback\",\n \"payload\": \"/ke14_death\"\n },\n {\n \"title\": \"Etsi vapaasti palveluita\",\n \"type\": \"postback\",\n \"payload\": \"/service_search\"\n }\n ]\n }\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1708252,\n \"name\": \"action_listen\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670821.483857,\n \"text\": \"/service_search\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [],\n \"message_id\": \"7c25cbf27151428589d7fc51620e508a\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"name\": \"service_search\",\n \"confidence\": 1\n }\n ],\n \"text\": \"\"\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"7c25cbf27151428589d7fc51620e508a\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670821.5257907,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670821.5258121,\n \"name\": \"general_service_search_form\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"active_loop\",\n \"timestamp\": 1628670821.5258555,\n \"name\": \"general_service_search_form\"\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670821.5258627,\n \"metadata\": {\n \"linkTarget\": \"_blank\",\n \"userInput\": \"show\",\n \"forceOpen\": False,\n \"forceClose\": False,\n \"pageChangeCallbacks\": None,\n \"pageEventCallbacks\": None,\n \"template_name\": \"utter_ask_general_service_search_text\"\n },\n \"text\": \"Kuvaile ensiksi palvelutarvettasi\",\n \"data\": {}\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670821.5258706,\n \"name\": \"requested_slot\",\n \"value\": \"general_service_search_text\"\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670821.5335426,\n \"name\": \"action_listen\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670831.59987,\n \"text\": \"haluaisin mennä uimarannalle\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [],\n \"text\": \"haluaisin mennä uimarannalle\",\n \"message_id\": \"1a5dad116ee34938b11f68e60c1814ba\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"id\": 1679135316125928700,\n \"name\": \"chitchat.bye\",\n \"confidence\": 0.8972764015197754,\n \"canonical\": \"hei hei\"\n }\n ]\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"1a5dad116ee34938b11f68e60c1814ba\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670831.625026,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670831.625047,\n \"name\": \"general_service_search_form\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670831.6250882,\n \"name\": \"general_service_search_text\",\n \"value\": \"haluaisin mennä uimarannalle\"\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670831.6250937,\n \"metadata\": {\n \"template_name\": \"utter_ask_municipality\"\n },\n \"text\": \"Kerrotko vielä, mistä kunnasta haluat palveluita?\",\n \"data\": {}\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670831.6250968,\n \"name\": \"requested_slot\",\n \"value\": \"municipality\"\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670831.6321259,\n \"name\": \"action_listen\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670839.1954765,\n \"text\": \"turku\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [\n {\n \"entity\": \"municipality_entity\",\n \"start\": 0,\n \"end\": 5,\n \"confidence_entity\": 0.9993183612823486,\n \"value\": \"turku\",\n \"extractor\": \"DIETClassifier\"\n }\n ],\n \"text\": \"turku\",\n \"message_id\": \"e8c8472b6ec04644a241510e418b5332\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"id\": 8390830771550880000,\n \"name\": \"service_municipality_choice\",\n \"confidence\": 0.999856173992157,\n \"canonical\": \"haluan palveluita turusta\"\n }\n ]\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"e8c8472b6ec04644a241510e418b5332\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670839.203917,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670839.2039268,\n \"name\": \"general_service_search_form\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670839.2039347,\n \"name\": \"municipality\",\n \"value\": \"turku\"\n },\n {\n \"event\": \"active_loop\",\n \"timestamp\": 1628670839.203938\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670839.203941,\n \"name\": \"requested_slot\"\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670839.9863353,\n \"name\": \"action_recommend_services\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n }\n ]\n\n def get_slot(self, key):\n return self.slots[key]\n\n\n# This method will be used by the mock to replace requests.post to service recommender API\ndef mocked_requests_post(*args, **kwargs):\n\n class MockResponse:\n def __init__(self, json_data, status_code):\n self.json_data = json_data\n self.status_code = status_code\n\n def json(self):\n return self.json_data\n\n def text(self):\n return json.dumps(self.json_data)\n\n if args[0] == 'request_success/services/recommend':\n return MockResponse(SERVICE_RECOMMENDER_JSON_RESPONSE_SUCCESS, 200)\n elif args[0] == 'request_error/services/recommend':\n return MockResponse(SERVICE_RECOMMENDER_JSON_RESPONSE_ERROR, 400)\n\n\nclass TestRasaActionsRecommendServices(unittest.TestCase):\n\n def setUp(self):\n self.tracker = TestRasaTracker()\n\n @patch('requests.post', side_effect=mocked_requests_post)\n def test_recommend_services_action_success(self, mock_post):\n os.environ['RASA_ACTIONS_SERVICE_RECOMMENDER_ENDPOINT'] = 'request_success'\n dispatcher = CollectingDispatcher()\n action = RecommendServices()\n action.run(dispatcher, self.tracker, None)\n\n self.assertEqual(\n dispatcher.messages[0]['attachment']['payload']['elements'][0][\n 'title'], SERVICE_RECOMMENDER_JSON_RESPONSE_SUCCESS[0]['service']['name']['fi']\n )\n\n @patch('requests.post', side_effect=mocked_requests_post)\n def test_recommend_services_action_error(self, mock_post):\n os.environ['RASA_ACTIONS_SERVICE_RECOMMENDER_ENDPOINT'] = 'request_error'\n dispatcher = CollectingDispatcher()\n action = RecommendServices()\n action.run(dispatcher, self.tracker, None)\n self.assertEqual(\n dispatcher.messages[0]['response'], 'utter_recommendation_error')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "City-of-Turku/PaohRasaPlatform", "sub_path": "test/test_rasa_actions.py", "file_name": "test_rasa_actions.py", "file_ext": "py", "file_size_in_byte": 33857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 799, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 807, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 814, "usage_type": "attribute"}, {"api_name": "rasa_sdk.executor.CollectingDispatcher", "line_number": 815, "usage_type": "call"}, {"api_name": "actions.service_recommender_action.RecommendServices", "line_number": 816, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 812, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 826, "usage_type": "attribute"}, {"api_name": "rasa_sdk.executor.CollectingDispatcher", "line_number": 827, "usage_type": "call"}, {"api_name": "actions.service_recommender_action.RecommendServices", "line_number": 828, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 824, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 835, "usage_type": "call"}]} {"seq_id": "43634588065", "text": "import sys\nfrom typing import List\n\nimport testing.test_data\nfrom roland import DataBlock, RolandData, GenericRoland, GenericRolandWithBackwardCompatibility\nfrom Roland_JV1080 import jv_1080\nfrom Roland_JV80 import jv_80\n\nthis_module = sys.modules[__name__]\n\n# XV-3080 and XV-5080. But the XV-5080 has these Patch Split Key messages as well!? We can ignore them?\n_xv3080_patch_data = [DataBlock((0x00, 0x00, 0x00, 0x00), 0x4f, \"Patch common\"),\n DataBlock((0x00, 0x00, 0x02, 0x00), (0x01, 0x11), \"Patch common MFX\"),\n DataBlock((0x00, 0x00, 0x04, 0x00), 0x34, \"Patch common Chorus\"),\n DataBlock((0x00, 0x00, 0x06, 0x00), 0x53, \"Patch common Reverb\"),\n DataBlock((0x00, 0x00, 0x10, 0x00), 0x29, \"Patch common Tone Mix Table\"),\n DataBlock((0x00, 0x00, 0x20, 0x00), (0x01, 0x09), \"Tone 1\"),\n DataBlock((0x00, 0x00, 0x22, 0x00), (0x01, 0x09), \"Tone 2\"),\n DataBlock((0x00, 0x00, 0x24, 0x00), (0x01, 0x09), \"Tone 3\"),\n DataBlock((0x00, 0x00, 0x26, 0x00), (0x01, 0x09), \"Tone 4\")]\n_xv3080_edit_buffer_addresses = RolandData(\"XV-3080 Temporary Patch\", 1, 4, 4,\n (0x1f, 0x00, 0x00, 0x00),\n _xv3080_patch_data)\n_xv3080_program_buffer_addresses = RolandData(\"XV-3080 User Patches\", 128, 4, 4,\n (0x30, 0x00, 0x00, 0x00),\n _xv3080_patch_data)\n# This can be used as an alternative way to detect the XV-3080\n# _xv3080_system_common = RolandData(\"XV-3080 System Common\", 1, 4, 4, (0x00, 0x00, 0x00, 0x00),\n# [DataBlock((0x00, 0x00, 0x00, 0x00), 0x28, \"System common\")])\nxv_3080_main = GenericRoland(\"Roland XV-3080\",\n model_id=[0x00, 0x10],\n address_size=4,\n edit_buffer=_xv3080_edit_buffer_addresses,\n program_dump=_xv3080_program_buffer_addresses,\n category_index=0x0c,\n device_family=[0x10, 0x01]) # Interestingly, the XV-3080 seems the first model to support the generic device inquiry\nxv_3080 = GenericRolandWithBackwardCompatibility(xv_3080_main, [jv_80, jv_1080])\nxv_3080.install(this_module)\n\n\n# and XV-5080 and XV-5050?\n\n\ndef setupHelp():\n return \"Make sure the Receive Exclusive parameter (SYSTEM/COMMON) is ON, and the synth is set to Patch Mode\"\n\n\n# Test data picked up by test_adaptation.py\ndef make_test_data():\n def programs(data: testing.TestData) -> List[testing.ProgramTestData]:\n patch = []\n names = [\"RedPowerBass\", \"Sinus QSB\", \"Super W Bass\"]\n i = 0\n # Extract the first 3 programs from the sysex dump loaded, and yield them with name and number to the test code\n for message in data.all_messages:\n if xv_3080.isPartOfSingleProgramDump(message):\n patch.extend(message)\n if xv_3080.isSingleProgramDump(patch):\n yield testing.ProgramTestData(message=patch, name=names[i], number=i)\n patch = []\n i += 1\n if i >= len(names):\n break\n\n return testing.TestData(sysex=\"testData/jv1080_AGSOUND1.SYX\",\n program_generator=programs,\n program_dump_request=\"f0 41 10 00 10 11 30 00 00 00 00 00 00 4f 01 f7\",\n device_detect_call=\"f0 7e 00 06 01 f7\",\n device_detect_reply=(\"f0 7e 10 06 02 41 10 01 00 00 00 00 00 00 f7\", 0))\n", "repo_name": "christofmuc/KnobKraft-orm", "sub_path": "adaptations/Roland_XV3080.py", "file_name": "Roland_XV3080.py", "file_ext": "py", "file_size_in_byte": 3752, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 152, "dataset": "github-code", "pt": "46", "api": [{"api_name": "sys.modules", "line_number": 9, "usage_type": "attribute"}, {"api_name": "roland.DataBlock", "line_number": 12, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 13, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 14, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 15, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 16, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 17, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 18, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 19, "usage_type": "call"}, {"api_name": "roland.DataBlock", "line_number": 20, "usage_type": "call"}, {"api_name": "roland.RolandData", "line_number": 21, "usage_type": "call"}, {"api_name": "roland.RolandData", "line_number": 24, "usage_type": "call"}, {"api_name": "roland.GenericRoland", "line_number": 30, "usage_type": "call"}, {"api_name": "roland.GenericRolandWithBackwardCompatibility", "line_number": 37, "usage_type": "call"}, {"api_name": "Roland_JV80.jv_80", "line_number": 37, "usage_type": "name"}, {"api_name": "Roland_JV1080.jv_1080", "line_number": 37, "usage_type": "name"}, {"api_name": "testing.test_data.TestData", "line_number": 50, "usage_type": "attribute"}, {"api_name": "testing.test_data", "line_number": 50, "usage_type": "name"}, {"api_name": "testing.test_data.ProgramTestData", "line_number": 59, "usage_type": "call"}, {"api_name": "testing.test_data", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "testing.test_data.ProgramTestData", "line_number": 50, "usage_type": "attribute"}, {"api_name": "testing.test_data.TestData", "line_number": 65, "usage_type": "call"}, {"api_name": "testing.test_data", "line_number": 65, "usage_type": "name"}]} {"seq_id": "30958864135", "text": "import tensorflow as tf\nfrom keras.models import Model\n\nuser_NN = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(32)\n])\n\nitem_NN = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(32)\n])\n\nnum_user_features = 10\nnum_item_features = 10\n\n# create the user input and point to the base network\nuser_input = tf.keras.layers.Input(shape=(num_user_features))\nvu = user_NN(user_input)\nvu = tf.linalg.l2_normalize(vu, axis=1)\n\n# create the item input and point to the base network\nitem_input = tf.keras.layers.Input(shape=(num_item_features))\nvm = item_NN(item_input)\nvm = tf.linalg.l2_normalize(vm, axis=1)\n\n# measure the similarity between the user and item embeddings\noutput = tf.keras.layers.Dot(axes=1)([vu, vm])\n\n# specify the inputs and output of the model\nmodel = Model(inputs=[user_input, item_input], outputs=output)\n\n# specify the cost function and optimization strategy\ncost_fn = tf.keras.losses.MeanSquaredError()", "repo_name": "ali-izhar/machine-learning", "sub_path": "Theory/Deep_Learning/Recommender_Systems/Content_Based/content_recommender.py", "file_name": "content_recommender.py", "file_ext": "py", "file_size_in_byte": 1134, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "46", "api": [{"api_name": "tensorflow.keras.models.Sequential", "line_number": 4, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 4, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 5, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 5, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 6, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 7, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 7, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.linalg.l2_normalize", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.linalg", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.linalg.l2_normalize", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.linalg", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dot", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 30, "usage_type": "attribute"}, {"api_name": "keras.models.Model", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.MeanSquaredError", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 36, "usage_type": "attribute"}]} {"seq_id": "11519581908", "text": "import json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List\n\nimport github_action_utils as gha_utils\nimport requests\nimport yaml\nfrom dataclass_wizard import YAMLWizard\n\n\n@dataclass\nclass Specification:\n \"\"\"Represents an OpenAPI specification.\"\"\"\n id: str\n label: str\n url: str\n\n\n@dataclass\nclass Organization:\n \"\"\"Represents an organization that provides OpenAPI specifications.\"\"\"\n id: str\n label: str\n specifications: List[Specification]\n\n\n@dataclass\nclass SpecificationsFile(YAMLWizard):\n \"\"\"Represents a file containing a list of organizations with their OpenAPI specifications.\"\"\"\n organizations: List[Organization]\n\n\ndef download_specification(url: str) -> str:\n \"\"\"Downloads an OpenAPI specification from a given URL and returns its content.\"\"\"\n try:\n response = requests.get(url)\n response.raise_for_status()\n return response.text\n except requests.exceptions.RequestException as e:\n raise ValueError(f\"Failed to download specification from {url}: {e}\")\n\n\ndef parse_specification(spec: str) -> str:\n \"\"\"Parses a given OpenAPI specification content and returns it in JSON format.\"\"\"\n try:\n parsed_spec = yaml.safe_load(spec)\n return json.dumps(parsed_spec, indent=4)\n except yaml.YAMLError as e:\n raise ValueError(f\"Failed to parse specification: {e}\")\n\n\ndef save_specification(spec: str, filepath: Path) -> None:\n \"\"\"Saves a given OpenAPI specification content to a file at the given path.\"\"\"\n try:\n filepath.parent.mkdir(parents=True, exist_ok=True)\n filepath.write_text(spec)\n except OSError as e:\n raise ValueError(f\"Failed to save specification to {filepath}: {e}\")\n\n\ndef download_format_and_save_specs(organizations: List[Organization]) -> None:\n \"\"\"\n Downloads, formats and saves OpenAPI specifications for the given organizations.\n\n For each organization and its specifications, the function downloads the content\n of each specification, parses it and saves it in JSON format to a file with a name\n that includes the organization and specification IDs. If any errors occur during this\n process, a warning is logged.\n \"\"\"\n for organization in organizations:\n for spec in organization.specifications:\n try:\n spec_content = download_specification(spec.url)\n validated_spec = parse_specification(spec_content)\n\n path = Path('../../') / 'openapi' / f'{organization.id}_{spec.id}.json'\n save_specification(validated_spec, path)\n\n gha_utils.debug(f\"Saved specification for {organization.label}: {spec.label} ({path})\")\n except (ValueError, requests.exceptions.RequestException, OSError) as exp:\n with gha_utils.group(\"Warnings while downloading, formatting and saving OpenApi specs\"):\n gha_utils.warning(str(exp), title=\"Warning\")\n\n\nif __name__ == \"__main__\":\n # Load the OpenAPI specifications file and extract the list of organizations.\n specifications_yaml_file = '../../specifications.json'\n parsed_specification_file = SpecificationsFile.from_yaml_file(specifications_yaml_file)\n\n # Download, format and save the OpenAPI specifications for the organizations.\n download_format_and_save_specs(parsed_specification_file.organizations)\n", "repo_name": "ivpk/api-specifications", "sub_path": "scripts/openapi/openapi.py", "file_name": "openapi.py", "file_ext": "py", "file_size_in_byte": 3384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "dataclasses.dataclass", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 20, "usage_type": "name"}, {"api_name": "dataclass_wizard.YAMLWizard", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 31, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 28, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 40, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "yaml.YAMLError", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 77, "usage_type": "call"}, {"api_name": "github_action_utils.debug", "line_number": 80, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 81, "usage_type": "attribute"}, {"api_name": "github_action_utils.group", "line_number": 82, "usage_type": "call"}, {"api_name": "github_action_utils.warning", "line_number": 83, "usage_type": "call"}]} {"seq_id": "41994451883", "text": "import torch\nimport numpy as np\n\ndef get_uvcoords(H, W):\n i, j = np.meshgrid(np.linspace(0, W - 1, W), np.linspace(0, H - 1, H))\n dx = 1 / W\n dy = 1 / H\n for x in range(W):\n i[:, x] = dx / 2 + dx * x\n for y in range(H):\n j[y, :] = dy / 2 + dy * y\n\n j = np.flipud(j)\n uvs = np.stack((i, j), axis=2).reshape(H * W, 2)\n return uvs\n\ndef img2flat(img):\n \"\"\"\n [H,W,C] to [H*W,C] from row top down\n :param img:\n :return:\n \"\"\"\n rows=torch.chunk(img,len(img))\n flat=torch.cat(rows,dim=1)\n return flat.squeeze()\n\ndef flat2img(flat,H):\n \"\"\"\n [H*W,C] to [H,W,C] follow row top down rule\n :param flat:\n :return:\n \"\"\"\n rows=torch.chunk(flat,H)\n img=torch.stack(rows)\n return img\n\n\n# img=torch.tensor(np.arange(75).reshape((5,5,3)))\n# print(img)\n# print(flat2img(img2flat(img),len(img)))\n#\n# ten=torch.Tensor(np.arange(25*3).reshape(25,3))\n# # print(ten)\n# # rows=torch.chunk(ten,5,dim=0)\n# # img=torch.stack(rows)\n# # print(img.shape)\n# # img.transpose_(0,2) # hwc-cwh\n# # img.transpose_(1,2) # cwh-chw\n# # print(img.shape)\n# print(ten.data)\n\nten1=torch.tensor(np.arange(15).reshape(5,3))\nten2=torch.tensor(np.arange(15).reshape(5,3)+1)\nten3=torch.tensor(np.arange(15).reshape(5,3)+2)\nlis=[ten1,ten2,ten3]\nten4=torch.cat(lis,dim=0)\nprint(ten4)\n", "repo_name": "mudimingquedeyinmoujia/python_learning", "sub_path": "torch_test/chunk.py", "file_name": "chunk.py", "file_ext": "py", "file_size_in_byte": 1316, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "numpy.meshgrid", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.chunk", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.chunk", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 56, "usage_type": "call"}]} {"seq_id": "17038517718", "text": "#!/usr/bin/env python\n\nfrom datetime import datetime\nimport re\nimport sys\n\ndef mapper():\n\ttld = \"http://www.theassociates.co.uk\"\n\tpattern = '^([\\d.]+) ([\\w-]+) ([\\w-]+) \\[(.+)\\] \\\"(.+)\\\" (\\d{3}) (\\d+)$'\n\tfor line in sys.stdin:\n\t\tresult = re.match(pattern, line)\n\t\tif result is None:\n\t\t\tcontinue\n\t\ttime_str, request = result.group(4), result.group(5)\n\t\ttry:\n\t\t\tmethod, resource, protocol = request.split(\" \")\n\t\texcept ValueError:\n\t\t\tcontinue\n\n\t\t# Need to convert to ordinal because we want to sort by day\n\t\ttime_dt = datetime.strptime(time_str.split(\" \")[0], \"%d/%b/%Y:%X\")\n\t\ttime_ordinal = time_dt.toordinal()\n\n\t\tif resource.startswith(tld):\n\t\t\tresource = resource[len(tld):]\n\n\t\tif resource == \"/\":\n\t\t\tprint(\"{}\\t1\".format(time_ordinal))\n\nif __name__ == \"__main__\":\n\tmapper()\n", "repo_name": "yahwang/Learn-Big-Data-Essentials-Yandex", "sub_path": "data/mapreduce_test/mapper.py", "file_name": "mapper.py", "file_ext": "py", "file_size_in_byte": 776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "46", "api": [{"api_name": "sys.stdin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}]} {"seq_id": "7913187534", "text": "import logging\nimport uuid\n\nfrom django.core import mail\nfrom django.test import TestCase\n\nfrom classroom.factories import ClassroomFactory, UserFactory\nfrom classroom.forms import EnrollmentForm, PostForm\n\n\nclass EnrollmentFormTests(TestCase):\n def test_valid_enrollment_form_with_new_enrollment_sends_email(self):\n student = UserFactory()\n classroom = ClassroomFactory()\n\n code = str(classroom.id)\n form = EnrollmentForm({'code': code})\n\n self.assertTrue(form.is_valid())\n\n with self.assertLogs('classroom.forms', level='INFO') as cm:\n form.send_mail(student=student, classroom=classroom)\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, 'Site message')\n self.assertGreaterEqual(len(cm.output), 1)\n\n def test_invalid_enrollment_form_does_not_sends_email(self):\n form = EnrollmentForm({'code': '123456'})\n self.assertFalse(form.is_valid())\n\n\nclass PostFormTests(TestCase):\n def test_valid_post_form_works(self):\n form = PostForm({'title': 'A new beginning', 'content': 'We have to start over again'})\n self.assertTrue(form.is_valid())\n\n def test_invalid_post_form(self):\n form = PostForm({'title': 'A new beginning'})\n self.assertFalse(form.is_valid())\n", "repo_name": "gurupratap-matharu/cities", "sub_path": "classroom/tests/test_forms.py", "file_name": "test_forms.py", "file_ext": "py", "file_size_in_byte": 1315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "classroom.factories.UserFactory", "line_number": 13, "usage_type": "call"}, {"api_name": "classroom.factories", "line_number": 14, "usage_type": "name"}, {"api_name": "classroom.factories.ClassroomFactory", "line_number": 14, "usage_type": "call"}, {"api_name": "classroom.factories.id", "line_number": 16, "usage_type": "attribute"}, {"api_name": "classroom.factories", "line_number": 16, "usage_type": "name"}, {"api_name": "classroom.forms.EnrollmentForm", "line_number": 17, "usage_type": "call"}, {"api_name": "classroom.factories", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.mail.outbox", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 24, "usage_type": "name"}, {"api_name": "django.core.mail.outbox", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 25, "usage_type": "name"}, {"api_name": "classroom.forms.EnrollmentForm", "line_number": 29, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 33, "usage_type": "name"}, {"api_name": "classroom.forms.PostForm", "line_number": 35, "usage_type": "call"}, {"api_name": "classroom.forms.PostForm", "line_number": 39, "usage_type": "call"}]} {"seq_id": "14809632057", "text": "\"\"\"\nImplementations for several probabilistic error models\n\"\"\"\n\nfrom typing import Optional, Tuple\n\nimport numpy as np\n\n\ndef single_dist_mc(\n emap: np.ndarray,\n x_dist: np.ndarray,\n w_dist: np.ndarray,\n fan_in: float,\n num_samples: int = int(1e5),\n) -> Tuple[float, float]:\n \"\"\"\n Generate error mean and standard deviation using Monte Carlo\n approach as described in: https://arxiv.org/abs/1912.00700\n\n Args:\n emap: The multiplier's error map\n x_dist: Operand distribution of activations\n w_dist: Operand distribution of weights\n fan_in: Incoming connections for layer\n num_samples: Number of Monte Carlo simulation runs. Defaults to int(1e5).\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n prob_x, prob_w = np.meshgrid(\n x_dist.astype(np.float64), w_dist.astype(np.float64), indexing=\"ij\"\n )\n probabilities = (prob_x * prob_w).flatten()\n emap = emap.flatten()\n monte_carlo_runs = np.random.choice(\n emap, size=(num_samples, fan_in), p=probabilities\n )\n monte_carlo_runs = np.sum(monte_carlo_runs, axis=1)\n return (\n np.mean(monte_carlo_runs) / fan_in,\n np.std(monte_carlo_runs, dtype=np.float64) / fan_in,\n )\n\n\ndef error_prediction(\n emap: np.ndarray, x_dist: np.ndarray, w_dist: np.ndarray, fan_in: float\n) -> Tuple[float, float]:\n \"\"\"\n Generate error mean and standard deviation using the\n global distribution of activations and weights\n\n Args:\n emap: The multiplier's error map\n x_dist: Operand distribution of activations\n w_dist: Operand distribution of weights\n fan_in: Incoming connections for layer\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n emap = emap.astype(np.float64)\n prob_x, prob_w = np.meshgrid(\n x_dist.astype(np.float64), w_dist.astype(np.float64), indexing=\"ij\"\n )\n mean = np.sum(emap * prob_x * prob_w)\n std = np.sqrt(np.sum(((emap - mean) ** 2) * prob_x * prob_w)) / np.sqrt(fan_in)\n return mean, std\n\n\ndef get_sample_population(tensor: np.ndarray, num_samples: int = 512) -> np.ndarray:\n \"\"\"\n Randomly select samples from a tensor that cover the receptive field of one neuron\n\n Args:\n tensor: Tensor to draw samples from\n num_samples: Number of samples to draw. Defaults to 512.\n\n Returns:\n Sampled 2D Tensor of shape [num_samples, tensor.shape[-1]]\n \"\"\"\n flat_dim = np.prod(tensor.shape[:-1])\n rand_idx = np.random.choice(flat_dim, num_samples)\n return tensor.reshape(flat_dim, tensor.shape[-1])[rand_idx]\n\n\ndef population_prediction(\n emap: np.ndarray, x_multidist: np.ndarray, w_dist: np.ndarray, fan_in: float\n) -> Tuple[float, float]:\n \"\"\"\n Generate prediction of mean and standard deviation using several\n sampled local distributions\n\n Args:\n emap: The multiplier's error map\n x_multidist: Array of several operand distributions for activations\n w_dist: Operand distribution of weights\n fan_in: Incoming connections for layer\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n # Single distribution error computation for each operand distribution\n means, stds = [], []\n for x_dist in x_multidist:\n mean, std = error_prediction(emap, x_dist, w_dist, fan_in)\n means.append(mean)\n stds.append(std)\n npmeans = np.array(means)\n npstds = np.array(stds)\n\n # Aggregate error distributions (Eq. 15 & Eq. 16)\n mean_aggregate = np.mean(npmeans)\n std_aggregate = np.sqrt(\n (\n np.sum(npmeans**2 + npstds**2)\n - (np.sum(npmeans) ** 2) / x_multidist.shape[0]\n )\n / x_multidist.shape[0]\n )\n return mean_aggregate, std_aggregate\n\n\ndef to_distribution(\n tensor: Optional[np.ndarray], min_val: int, max_val: int\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Turn tensor of weights/activations into a frequency distribution (i.e. build a histogram)\n\n Args:\n tensor: Tensor to build histogram from\n min_val: Lowest possible operand value in tensor\n max_val: Highest possible operand value in tensor\n\n Returns:\n Tuple of Arrays where first array contains the full numerical range between\n min_val and max_val inclusively and second array contains the relative frequency\n of each operand\n\n Raises:\n ValueError: If run before features maps have been populated\n by call to `utils.model.get_feature_maps`\n\n \"\"\"\n if tensor is None:\n raise ValueError(\"Populate input tensor with intermediate features maps\")\n num_range = np.arange(min_val, max_val + 1)\n counts = np.zeros_like(num_range)\n nums, freqs = np.unique(tensor.flatten().astype(np.int32), return_counts=True)\n counts[nums + min_val] = freqs.astype(np.float64)\n counts = counts / np.sum(freqs)\n return num_range, counts\n\n\ndef error_calculation(\n accurate: np.ndarray, approximate: np.ndarray, fan_in: float\n) -> Tuple[float, float]:\n \"\"\"\n Calculate mean and standard deviation of the observed error between\n accurate computation and approximate computation\n\n Args:\n accurate: Accurate computation results\n approximate: Approximate computation results\n fan_in: Number of incoming neuron connections\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n mean = np.mean(accurate - approximate) / fan_in\n std = np.std((accurate - approximate) / fan_in, dtype=np.float64)\n return mean, std\n", "repo_name": "etrommer/agn-approx", "sub_path": "src/agnapprox/utils/error_stats.py", "file_name": "error_stats.py", "file_ext": "py", "file_size_in_byte": 5585, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "46", "api": [{"api_name": "numpy.ndarray", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 42, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.prod", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 117, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 89, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 150, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 155, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 170, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 156, "usage_type": "name"}]} {"seq_id": "25457012513", "text": "\"\"\"This is the main file for icarus\"\"\"\n\nimport os\nimport curses\nimport sys\nimport configparser # https://docs.python.org/3/library/configparser.html\nfrom multiprocessing import Process\nimport aiosmtpd.smtp\n\n# Below are my functions.\nfrom app.smtp import startsmtp\nfrom app.editor import editor\nfrom app.udp import runudp\nfrom app.tcp import runtcp\nfrom app.ftp import ftpserver\nfrom app.abuseipdb import largfeed\nimport app.cfg\n\n\n# pylint: disable=R0801\nconfig = configparser.ConfigParser()\nconfig.read('icarus.config')\nsmtpport = config['ADDRESSES']['SMTPPort']\nabuseip = config['IPDBAPI']['AbuseIPDB']\nabuseapikey = config['IPDBAPI']['IPDBAPI']\nvtapikey = config['APIKEY']['apikey']\nvirustotal = config['APIKEY']['Virustotal']\nsyslogenable = config['SYSLOG']['Syslog']\nsyslogip = config['SYSLOG']['IP']\nsyslogport = config['SYSLOG']['PORT']\nlargfeedon = config['LARGFEED']['Largfeed']\nlargfeedserver = config['LARGFEED']['Server']\nlargfeedport = config['LARGFEED']['Port']\ntcpports = config['PORTS']['tcpports']\nudpports = config['PORTS']['udpports']\n\naiosmtpd.smtp.__ident__ = \"Microsoft ESMTP MAIL Service\"\n\n\n# pylint: disable=R0915, W0613\ndef main(window):\n \"\"\"MAIN!\"\"\"\n # Starting SMTP Service\n process2 = Process(name='smtp', target=startsmtp, daemon=True)\n process2.start()\n # startsmtp()\n # Starting FTP Service\n process1 = Process(name='Ftp', target=ftpserver, daemon=True)\n process1.start()\n # Largfeed Queue processor\n if largfeedon != \"no\":\n process3 = Process(name='largfeed', target=largfeed, daemon=True)\n process3.start()\n\n # Dynamic low interaction port services.\n\n for tcpport in tcpports.replace(\" \", \"\").split(','):\n dyntcpprocess = Process(name='DynamicTCP ' + str(tcpport), target=runtcp, daemon=True, args=(int(tcpport),))\n dyntcpprocess.start()\n\n for udpport in udpports.replace(\" \", \"\").split(','):\n dynudpprocess = Process(name='DynamicUDP ' + str(udpport), target=runudp, daemon=True, args=(int(udpport),))\n dynudpprocess.start()\n\n while True:\n scurses = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.napms(500)\n # Pretty standard configs. I have the curses refresh set to 3 seconds.\n # https://docs.python.org/3.5/library/curses.html#module-curses\n curses.start_color()\n curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)\n curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)\n sheight, swidth = scurses.getmaxyx()\n cursewinder = curses.newwin(sheight, swidth, 0, 0)\n cursewinder.nodelay(True)\n # No delay fixes a problem of the screen not updating properly.\n\n # the above 5 are just standard curses commands.\n # First number is vertical, 51 is horizontal\n cursewinder.addstr(0, 51, \"Icarus.config\")\n cursewinder.addstr(1, 51, \"Virustotal:\")\n cursewinder.addstr(2, 51, \"Enabled: \" + virustotal)\n cursewinder.addstr(3, 51, \"APIKEY: \" + vtapikey)\n cursewinder.addstr(5, 51, \"AbuseIPDB:\")\n cursewinder.addstr(6, 51, \"Enabled: \" + abuseip)\n cursewinder.addstr(7, 51, \"APIKEY: \" + abuseapikey)\n cursewinder.addstr(9, 51, \"Syslog:\")\n cursewinder.addstr(10, 51, \"Enabled: \" + syslogenable)\n cursewinder.addstr(11, 51, \"Syslog Server: \" + syslogip + \":\" + syslogport)\n cursewinder.addstr(13, 51, \"LARGfeed:\")\n cursewinder.addstr(14, 51, \"Enabled: \" + largfeedon)\n cursewinder.addstr(15, 51, \"LARGfeed Server: \" + largfeedserver + \":\" + largfeedport)\n cursewinder.addstr(17, 51, \"Press P to change values.\", curses.color_pair(2))\n cursewinder.addstr(18, 51, \"Press R to restart.\", curses.color_pair(3))\n cursewinder.addstr(19, 51, \"Press Q to quit.\", curses.color_pair(1))\n\n cursewinder.addstr(0, 0, \"ICARUS HONEYPOT\", curses.color_pair(1))\n\n cursewinder.addstr(12, 0, \"Attacks: \" + str(app.cfg.numattacks['num']))\n cursewinder.addstr(13, 0, \"Last 5 Attackers: \", curses.color_pair(3))\n if app.cfg.attackers:\n for num, address in enumerate(app.cfg.attackers, start=1):\n cursewinder.addstr((num + 13), 0, str(address))\n\n cursewinder.refresh()\n\n key = cursewinder.getch()\n if key == ord('q'):\n break\n if key == ord('r'):\n process1.terminate()\n process2.terminate()\n os.execv(sys.executable, ['python3'] + sys.argv)\n # Nice little thing that restarts a python script.\n elif key == ord('p'):\n editor() # from editor.py, opens your system editor.\n cursewinder.erase()\n cursewinder.refresh()\n\n\nif __name__ == '__main__':\n curses.wrapper(main)\n", "repo_name": "tbiens/icarus", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 4836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "46", "api": [{"api_name": "configparser.ConfigParser", "line_number": 21, "usage_type": "call"}, {"api_name": "aiosmtpd.smtp.smtp", "line_number": 37, "usage_type": "attribute"}, {"api_name": "aiosmtpd.smtp", "line_number": 37, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 44, "usage_type": "call"}, {"api_name": "app.smtp.startsmtp", "line_number": 44, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 48, "usage_type": "call"}, {"api_name": "app.ftp.ftpserver", "line_number": 48, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 52, "usage_type": "call"}, {"api_name": "app.abuseipdb.largfeed", "line_number": 52, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 58, "usage_type": "call"}, {"api_name": "app.tcp.runtcp", "line_number": 58, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 62, "usage_type": "call"}, {"api_name": "app.udp.runudp", "line_number": 62, "usage_type": "name"}, {"api_name": "curses.initscr", "line_number": 66, "usage_type": "call"}, {"api_name": "curses.curs_set", "line_number": 67, "usage_type": "call"}, {"api_name": "curses.noecho", "line_number": 68, "usage_type": "call"}, {"api_name": "curses.napms", "line_number": 69, "usage_type": "call"}, {"api_name": "curses.start_color", "line_number": 72, "usage_type": "call"}, {"api_name": "curses.init_pair", "line_number": 73, "usage_type": "call"}, {"api_name": "curses.COLOR_RED", "line_number": 73, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 73, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 74, "usage_type": "call"}, {"api_name": "curses.COLOR_CYAN", "line_number": 74, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 74, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 75, "usage_type": "call"}, {"api_name": "curses.COLOR_GREEN", "line_number": 75, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 75, "usage_type": "attribute"}, {"api_name": "curses.newwin", "line_number": 77, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 96, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 97, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 98, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 100, "usage_type": "call"}, {"api_name": "app.smtp.cfg", "line_number": 102, "usage_type": "attribute"}, {"api_name": "app.smtp", "line_number": 102, "usage_type": "name"}, {"api_name": "curses.color_pair", "line_number": 103, "usage_type": "call"}, {"api_name": "app.smtp.cfg", "line_number": 104, "usage_type": "attribute"}, {"api_name": "app.smtp", "line_number": 104, "usage_type": "name"}, {"api_name": "app.smtp.cfg", "line_number": 105, "usage_type": "attribute"}, {"api_name": "app.smtp", "line_number": 105, "usage_type": "name"}, {"api_name": "os.execv", "line_number": 116, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 116, "usage_type": "attribute"}, {"api_name": "app.editor.editor", "line_number": 119, "usage_type": "call"}, {"api_name": "curses.wrapper", "line_number": 125, "usage_type": "call"}]} {"seq_id": "43807990703", "text": "import socket\nimport threading\nimport hashlib\nimport time\nimport datetime\nimport random\nimport sys\n\n\n# Packet class definition\nclass packet():\n checksum = 0\n length = 0\n seqNo = 0\n msg = 0\n\n def make(self, data):\n self.msg = data\n self.length = str(len(data))\n self.checksum=hashlib.sha1(data.encode('utf-8')).hexdigest()\n print (\"Length: %s\\nSequence number: %s\" %(self.length, self.seqNo))\n\ndef RUDPServer(serverAddress, serverPort):\n # Start - Connection initiation\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind the socket to the port\n server_address = (serverAddress, serverPort)\n print ('Starting up on %s port %s' % server_address)\n sock.bind(server_address)\n\n # Listening for requests indefinitely\n while True:\n print ('Waiting to receive message')\n data, address = sock.recvfrom(600)\n # Delimiter\n delimiter = \"|:|:|\"\n\n # Seq number flag\n seqFlag = 0\n\n packet_count=0\n time.sleep(0.5)\n start_time=time.time()\n print (\"Request started at: \" + str(datetime.datetime.utcnow()))\n # Initialise packet class\n pkt = packet()\n threadSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n startTime=time.time()\n\n # Check if file is valid \n try:\n print (\"Opening file %s\" % data)\n fileName = data.decode('utf-8')\n fileRead = open(fileName, 'r')\n data = fileRead.read()\n fileRead.close()\n except:\n msg=\"FNF\"\n pkt.make(msg)\n finalPacket = str(pkt.checksum) + delimiter + str(pkt.seqNo) + delimiter + str(pkt.length) + delimiter + pkt.msg\n threadSock.sendto(finalPacket, address)\n print (\"Requested file could not be found, replied with FNF\")\n return\n\n \n # Fragment and send file 500 byte by 500 byte\n x = 0\n expectedPacket = (int((len(data) / 500) + 1))\n while (x < int((len(data) / 500) + 1)):\n packet_count += 1\n msg = data[x * 500:x * 500 + 500];\n pkt.make(msg);\n finalPacket = str(pkt.checksum) + delimiter + str(pkt.seqNo) + delimiter + str(pkt.length) + delimiter + pkt.msg\n\n # Send packet\n sent = threadSock.sendto(finalPacket.encode('utf-8'), address)\n print ('Sent %s bytes back to %s, awaiting acknowledgment..' % (sent, address))\n threadSock.settimeout(2)\n try:\n ack, address = threadSock.recvfrom(100);\n ack = ack.decode('utf-8')\n except:\n # else after timeout, resend \n print (\"Time out reached, resending ...%s\" % x)\n continue;\n # Check if acknowledgement is sent\n if ack.split(\",\")[0] == str(pkt.seqNo):\n pkt.seqNo = int(not pkt.seqNo)\n print (\"Acknowledged by: \" + ack + \"\\nAcknowledged at: \" + str(datetime.datetime.utcnow()) + \"\\nElapsed: \" + str(time.time() - start_time))\n x += 1\n endTime=time.time()\n print(\"\\nDone in within : \" + str(endTime-startTime))\n packetLoss = expectedPacket - packet_count\n print(\"Packet Loss : \"+ str(packetLoss))\n\n print ('Received %s bytes from %s' % (len(data), address))\n \nif __name__ == \"__main__\":\n # Set address and port\n serverAddress = sys.argv[1]\n serverPort = int(sys.argv[2])\n print(\"Server Address : \" + serverAddress + \"\\nServer Port : \"+ str(serverPort) +\"\\n\")\n RUDPServer(serverAddress, serverPort)\n\n\n", "repo_name": "chenwenshu/multithreaded-downloader", "sub_path": "TypeOfReliableUDP/RUDP_server.py", "file_name": "RUDP_server.py", "file_ext": "py", "file_size_in_byte": 3637, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "46", "api": [{"api_name": "hashlib.sha1", "line_number": 20, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 25, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 25, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 47, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 47, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 47, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 89, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 89, "usage_type": "call"}, {"api_name": "time.time", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 101, "usage_type": "attribute"}]} {"seq_id": "23871332913", "text": "from pydantic.types import Json\nimport pytest\nfrom fastapi.testclient import TestClient\nfrom src.main import app\n\npytest.token = \"\"\n\n@pytest.fixture(scope=\"module\")\ndef client():\n with TestClient(app) as client:\n yield client\n\n@pytest.mark.order(1)\ndef test_create_user(client):\n user_mock = {\n \"name\": \"usertest\",\n \"email\": \"user.test@gmail.com\",\n \"password\": \"confidential\",\n \"cpf\": \"12345678900\",\n \"pis\": \"12345678900\",\n \"active\": True,\n \"address\": {\n \"country\": \"brasil\",\n \"state\": \"santa catarina\",\n \"city\": \"florianopolis\",\n \"complement\": \"ilha da magia\",\n \"street\": \"lauro linhares\",\n \"number\": \"123\",\n \"cep\": \"12345678\"\n }\n }\n response = client.post(\"/users\", json=user_mock)\n \n assert response.status_code == 201, response.text\n\n@pytest.mark.order(2)\ndef test_login(client):\n credentials = {\n \"username\": \"user.test@gmail.com\",\n \"password\": \"confidential\",\n }\n response = client.post(\"/login\", data=credentials, headers={\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n })\n \n assert response.status_code == 200, response.text\n pytest.token = \"Bearer \" + response.json()[\"access_token\"]\n\n@pytest.mark.order(3)\ndef test_get_users(client):\n response = client.get(\"/users\")\n assert response.status_code == 200, response.text\n\n@pytest.mark.order(4)\ndef test_get_user(client):\n response = client.get(\"/users/me\")\n assert response.status_code == 200, response.text\n\n@pytest.mark.order(5)\ndef test_put_user(client):\n updated_user = {\n \"name\": \"new usertest\",\n \"address\": {\n \"number\": \"1234\",\n }\n }\n response = client.put(\"/users/me\", json=updated_user)\n assert response.status_code == 200, response.text\n\n@pytest.mark.order(6)\ndef test_delete_user(client):\n response = client.delete(\"/users/me\")\n assert response.status_code == 200, response.text\n", "repo_name": "inafranco/user-crud", "sub_path": "back-end/src/tests/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 2024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "pytest.token", "line_number": 6, "usage_type": "attribute"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 10, "usage_type": "call"}, {"api_name": "src.main.app", "line_number": 10, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 8, "usage_type": "call"}, {"api_name": "pytest.mark.order", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytest.token", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pytest.mark.order", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pytest.mark.order", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pytest.mark.order", "line_number": 54, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pytest.mark.order", "line_number": 59, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pytest.mark.order", "line_number": 70, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 70, "usage_type": "attribute"}]} {"seq_id": "2860350775", "text": "#coding=utf-8\nfrom base.models import BaseModel\nfrom django.db import models\n\nclass Questions_Model(BaseModel):\n project=models.CharField(u'所属项目',max_length=200,blank=False)\n question_desc=models.TextField(u'问题描述',blank=True)\n question_reason=models.TextField(u'产生问题原因',blank=True)\n question_module=models.CharField(u'问题所属模块',max_length=200,blank=False)\n question_reporter=models.CharField(u'提出问题人',max_length=200,blank=False)\n question_answer=models.CharField(u'问题跟进者',max_length=200,blank=False)\n question_progress=models.CharField(u'问题进度',max_length=200,blank=False)\n question_comments=models.CharField(u'问题备注',max_length=200,blank=False)\n is_system_cause=models.CharField(u'是否系统问题',max_length=200,blank=False)\n week=models.CharField(u'week',max_length=200,blank=False)\n\n class Meta:\n verbose_name = u'问题列表'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return '{project} {question_desc} {question_reason} {question_module} {question_reporter} {question_answer} {question_progress} {question_comments} {is_system_cause} {week}'.format(\n project=self.project,\n question_desc=self.question_desc,\n question_reason=self.question_reason,\n question_module=self.question_module,\n question_reporter=self.question_reporter,\n question_answer=self.question_answer,\n question_progress=self.question_progress,\n question_comments=self.question_comments,\n is_system_cause=self.is_system_cause,\n week=self.week\n )\n\nclass Project_Model(BaseModel):\n project_name=models.CharField(u'项目名称',max_length=200,blank=False)\n\n\nclass Module_Model(BaseModel):\n module_name=models.CharField(u'模块名称',max_length=200,blank=True)\n\nclass Progress_Model(BaseModel):\n progress_name=models.CharField(u'进度名称',max_length=200,blank=True)\n\n", "repo_name": "longyue123/tech", "sub_path": "tech/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2020, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "base.models.BaseModel", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "base.models.BaseModel", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "base.models.BaseModel", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "base.models.BaseModel", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}]} {"seq_id": "14989135656", "text": "from torchvision.models import resnet34\nimport torch.nn as nn\nimport torch\nimport streamlit as st\nfrom torchvision.transforms import transforms\nfrom torchvision.transforms import Compose\nfrom torchvision.transforms import ToTensor\nfrom torchvision.transforms import Resize\nimport numpy as np\n\nfrom torchvision.transforms import Compose, ToTensor, Resize\n\ndevice = torch.device(\"mps\")\ntransform = Compose([ToTensor(), Resize([150, 150])])\n\nf = transforms.Compose([\n # transforms.CenterCrop(300),\n transforms.RandomAffine(degrees=100, translate=(0.15, 0.15)),\n transforms.RandomGrayscale(p=0.2),\n transforms.Lambda(lambda x: x + torch.randn_like(x) * 0.03),\n transforms.Resize(150)\n ])\n\n@st.cache\ndef load_model(path):\n print(\"Loading model...\")\n model = resnet34()\n last_layer = model.fc\n model.fc = nn.Linear(last_layer.in_features, 2)\n model.load_state_dict(torch.load(path, map_location=device))\n model.to(device)\n return model\n\n\ndef predict(model, image):\n model.eval()\n\n transform = Compose([ToTensor(), Resize([150, 150])])\n\n image = transform(image)\n image = image.reshape([1, 3, 150, 150])\n output = model(image.to(device))\n _, predicted = torch.max(output.data, 1)\n return predicted\n\n\ndef compute_saliency_maps(X, y, model):\n model.eval()\n X = transform(X).reshape([1, 3, 150, 150])\n X.requires_grad_()\n\n saliency = None\n\n loss_function = nn.CrossEntropyLoss()\n output = model(X.to(device))\n loss = loss_function(output, y)\n loss.backward()\n\n saliency, _ = torch.max(torch.abs(X._grad), axis=1)\n return saliency.detach().numpy().reshape([150, 150])\n\ndef smoothing_loss(X):\n loss = 0.0\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, :, :-1]-X[:, :, :, 1:], 2)))\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, :, 1:]-X[:, :, :, :-1], 2)))\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, :-1, :]-X[:, :, 1:, :], 2)))\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, 1:, :]-X[:, :, :-1, :], 2)))\n return loss\n\n\ndef my_loss(output, y):\n return torch.sum(-1 / 10 * output[:, y]) # + torch.sum(output[:, 1-y])\n\n\ndef generate_images(X, y, model, lr, n):\n model.eval()\n\n X.requires_grad_()\n X_f = torch.stack([f(x) for x in X for y in range(n)]) # bs*n 150 150 3\n\n # loss_function = nn.CrossEntropyLoss()\n loss_function = my_loss\n # outputs = torch.stack([model(x.to(device) for x in X_f)]) # bs*n 2\n outputs = model(X_f.to(device)) # bs*n 2\n\n y_f = torch.stack([y_i for y_i in y for _ in range(n)]) # bs*n\n\n loss_main = loss_function(outputs, y_f) / n\n\n smoothing_loss_ = smoothing_loss(X)\n loss = loss_main + smoothing_loss_\n\n # y.shape: bs\n\n # bs*n 150 150 3\n\n loss.backward()\n # if randint(0, 20) == 20:\n X.requires_grad_(False)\n with torch.no_grad():\n X_new = X - lr * X.grad\n X.grad.zero_()\n\n difference = torch.sum(torch.abs(X_new - X))\n out_of_bound = torch.sum((X_new > 1) + (X_new < 0))\n\n print(\n loss_main.item(),\n smoothing_loss_.item(),\n difference,\n out_of_bound,\n # output.cpu().detach().numpy().tolist()\n )\n\n X_new[X_new < 0] = 0\n X_new[X_new > 1] = 1\n\n return X_new\n\n\ndef generate_image(x, y, model, lr, n):\n X_new = generate_images(x.unsqueeze(0), y, model, lr, n)\n return X_new[0]\n\ndef tweak_image(X, y, model):\n\n im = transform(X)\n\n n = 16\n lr = 0.3\n\n new_im = generate_image(im, y, model, lr, n)\n\n my_bar = st.progress(0)\n\n for i in range(80):\n my_bar.progress(i/80)\n new_im = generate_image(new_im, y, model, lr, n)\n\n new_numpy_im = new_im.detach().numpy().transpose(1, 2, 0)\n\n new_numpy_im[new_numpy_im < 0] = 0\n new_numpy_im[new_numpy_im > 1] = 1\n\n return new_numpy_im\n", "repo_name": "GSKW/ML", "sub_path": "practice/html/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 3799, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.device", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms.Resize", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms.Compose", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.transforms.RandomAffine", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "torchvision.transforms.transforms.RandomGrayscale", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.transforms.Lambda", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.randn_like", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms.Resize", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.models.resnet34", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 30, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms.Resize", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 105, "usage_type": "call"}, {"api_name": "streamlit.progress", "line_number": 134, "usage_type": "call"}]} {"seq_id": "12317841946", "text": "import json\nfrom numpy import array\n\nif __name__ == \"__main__\" :\n\n file=True\n with open(\"stickers_vectors_generation.json\") as file:\n generation = json.load(file)\n with open(\"main_data.json\") as file:\n data = json.load(file)\n\n corners = generation['C']\n edges = generation['E']\n faces = data['faces']\n\n print('{\\n \"C\": {')\n i = 0\n for corner in corners:\n i += 1\n val = array([0, 0, 0])\n for letter in corner:\n if letter != ' ':\n val += faces[letter]\n print(' \"' + str(i) + '\": [' + str(val[0]) + ', ' + str(val[1]) + ', ' + str(val[2]) + ('],' if i < 24 else ']'))\n\n print(' },\\n \"E\": {')\n i = 0\n for edge in edges:\n i += 1\n val = array([0, 0, 0])\n for letter in edge:\n if letter != ' ':\n val += faces[letter]\n print(' \"' + str(i) + '\": [' + str(val[0]) + ', ' + str(val[1]) + ', ' + str(val[2]) + ('],' if i < 24 else ']'))\n print(' }\\n}')\n", "repo_name": "juliengiraud/Rubik-s_Cube", "sub_path": "CircuitBuilder/stickersVectorsGeneration.py", "file_name": "stickersVectorsGeneration.py", "file_ext": "py", "file_size_in_byte": 1028, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "json.load", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}]} {"seq_id": "9907747205", "text": "from django.test import TestCase, Client\n\nfrom account.models import User\nfrom care_point.forms import IllnessForm\nfrom care_point.models import Illness, Manager, Point_of_care\n\n\nclass IllnessTest(TestCase):\n def setUp(self):\n self.client = Client()\n\n Illness.objects.create(name='illness_1', description='desc_1').save()\n Illness.objects.create(name='illness_2', description='desc_2').save()\n Illness.objects.create(name='illness_3', description='desc_3').save()\n\n self.user = User.objects.create(username='m1', first_name='name_m1', last_name='sure_name_m1', is_manager=True,\n is_caregiver=False, password='123456Mp')\n self.user.save()\n point_of_care = Point_of_care.objects.create(city='Bstok')\n point_of_care.save()\n manager = Manager.objects.create(user=self.user, point_of_care=point_of_care)\n manager.save()\n\n def test_redirect_if_not_logged_in(self):\n response = self.client.get('/care_point/illness/')\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, '/account/login/?next=/care_point/illness/')\n\n def test_should_return_all_objects_from_DB(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['illness']), 3)\n\n def test_should_return_details_of_one_object(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/1/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['illness'].name, 'illness_1')\n\n def test_should_return_status_code_404_if_object_not_exist(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/5/')\n illnesses = Illness.objects.all()\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(len(illnesses), 3)\n\n def test_should_delete_one_object(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/1/delete/')\n illnesses = Illness.objects.all()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(len(illnesses), 2)\n\n def test_should_return_illness_form(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/add/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response.context['form']), IllnessForm)\n\n def test_should_add_one_object(self):\n self.client.force_login(user=self.user)\n response = self.client.post('/care_point/illness/add/', {'name': 'illness_4', 'description': 'desc4'})\n illnesses = Illness.objects.all()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(len(illnesses), 4)\n\n def test_should_return_illness_update_form(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/2/update/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response.context['form']), IllnessForm)\n\n def test_should_update_object(self):\n self.client.force_login(user=self.user)\n response = self.client.post('/care_point/illness/2/update/', {'name': 'illness_update', 'description': 'desc_update'})\n updated_illness = Illness.objects.filter(pk=2).first()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, \"/care_point/illness/\")\n self.assertEqual(updated_illness.name, 'illness_update')\n self.assertEqual(updated_illness.description, 'desc_update')\n\n def test_should_not_update_object_when_parameter_is_wrong(self):\n self.client.force_login(user=self.user)\n response = self.client.post('/care_point/illness/2/update/', {'call': 'illness_update', 'description': 'desc_update'})\n updated_illness = Illness.objects.filter(pk=2).first()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(updated_illness.name, 'illness_2')\n self.assertEqual(updated_illness.description, 'desc_2')\n", "repo_name": "mario-pe/CarePoint", "sub_path": "care_point/tests/test_illness.py", "file_name": "test_illness.py", "file_ext": "py", "file_size_in_byte": 4290, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "django.test.TestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "django.test.Client", "line_number": 10, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 12, "usage_type": "name"}, {"api_name": "care_point.models.Illness.objects.create", "line_number": 13, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 13, "usage_type": "name"}, {"api_name": "care_point.models.Illness.objects.create", "line_number": 14, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 14, "usage_type": "name"}, {"api_name": "account.models.User.objects.create", "line_number": 16, "usage_type": "call"}, {"api_name": "account.models.User.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "account.models.User", "line_number": 16, "usage_type": "name"}, {"api_name": "care_point.models.Point_of_care.objects.create", "line_number": 19, "usage_type": "call"}, {"api_name": "care_point.models.Point_of_care.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "care_point.models.Point_of_care", "line_number": 19, "usage_type": "name"}, {"api_name": "care_point.models.Manager.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "care_point.models.Manager.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "care_point.models.Manager", "line_number": 21, "usage_type": "name"}, {"api_name": "care_point.models.Illness.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 47, "usage_type": "name"}, {"api_name": "care_point.models.Illness.objects.all", "line_number": 55, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 55, "usage_type": "name"}, {"api_name": "care_point.forms.IllnessForm", "line_number": 65, "usage_type": "argument"}, {"api_name": "care_point.models.Illness.objects.all", "line_number": 70, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 70, "usage_type": "name"}, {"api_name": "care_point.forms.IllnessForm", "line_number": 80, "usage_type": "argument"}, {"api_name": "care_point.models.Illness.objects.filter", "line_number": 85, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 85, "usage_type": "name"}, {"api_name": "care_point.models.Illness.objects.filter", "line_number": 95, "usage_type": "call"}, {"api_name": "care_point.models.Illness.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "care_point.models.Illness", "line_number": 95, "usage_type": "name"}]} {"seq_id": "20448119537", "text": "import pytest\nfrom pytest import approx\n\nfrom rules_engine import engine\n\n\n@pytest.mark.parametrize(\n \"avg_temp, balance_point, expected_result\",\n [\n (72, 60, 0), # outside hotter than balance point\n (60, 60, 0), # outside equal to balance point\n (57, 60, 3), # outside cooler than balance point\n ],\n)\ndef test_hdd(avg_temp, balance_point, expected_result):\n assert engine.hdd(avg_temp, balance_point) == expected_result\n\n\n@pytest.mark.parametrize(\n \"temps, expected_result\",\n [\n ([72, 60, 55, 61], 5), # one day with HDDs\n ([52, 60, 55], 13), # two days with HDDs\n ([72, 60, 65, 60, 80], 0), # no days with HDDs\n ],\n)\ndef test_period_hdd(temps, expected_result):\n assert engine.period_hdd(temps, 60) == expected_result\n\n\ndef test_average_indoor_temp():\n set_temp = 68\n setback = 62\n setback_hrs = 8\n\n # when there is no setback, just put 0 for the setback parameters\n assert engine.average_indoor_temp(set_temp, 0, 0) == set_temp\n assert engine.average_indoor_temp(set_temp, setback, setback_hrs) == 66\n\n\ndef test_bp_ua_estimates():\n home = engine.Home(\n engine.FuelType.GAS, heat_sys_efficiency=0.88, initial_balance_point=58\n )\n\n daily_temps_lists = [\n [28, 29, 30, 29],\n [32, 35, 35, 38],\n [41, 43, 42, 42],\n [72, 71, 70, 69],\n ]\n usages = [50, 45, 30, 0.96]\n inclusion_codes = [1, 1, 1, -1]\n home.initialize_billing_periods(daily_temps_lists, usages, inclusion_codes)\n home.calculate_avg_non_heating_usage()\n home.calculate_balance_point_and_ua()\n\n ua_1, ua_2, ua_3 = [bill.ua for bill in home.bills_winter]\n\n assert home.balance_point == 60\n assert ua_1 == approx(1450.5, abs=1)\n assert ua_2 == approx(1615.3, abs=1)\n assert ua_3 == approx(1479.6, abs=1)\n assert home.avg_ua == approx(1515.1, abs=1)\n assert home.stdev_pct == approx(0.0474, abs=0.01)\n\n\ndef test_bp_ua_with_outlier():\n home = engine.Home(\n engine.FuelType.GAS, heat_sys_efficiency=0.88, initial_balance_point=58\n )\n daily_temps_lists = [\n [41.7, 41.6, 32, 25.4],\n [28, 29, 30, 29],\n [32, 35, 35, 38],\n [41, 43, 42, 42],\n [72, 71, 70, 69],\n ]\n usages = [60, 50, 45, 30, 0.96]\n inclusion_codes = [1, 1, 1, 1, -1]\n home.initialize_billing_periods(daily_temps_lists, usages, inclusion_codes)\n home.calculate_avg_non_heating_usage()\n home.calculate_balance_point_and_ua()\n ua_1, ua_2, ua_3 = [bill.ua for bill in home.bills_winter]\n\n assert home.balance_point == 60\n assert ua_1 == approx(1450.5, abs=1)\n assert ua_2 == approx(1615.3, abs=1)\n assert ua_3 == approx(1479.6, abs=1)\n assert home.avg_ua == approx(1515.1, abs=1)\n assert home.stdev_pct == approx(0.0474, abs=0.01)\n", "repo_name": "codeforboston/home-energy-analysis-tool", "sub_path": "rules-engine/tests/test_rules_engine/test_engine.py", "file_name": "test_engine.py", "file_ext": "py", "file_size_in_byte": 2806, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "rules_engine.engine.hdd", "line_number": 16, "usage_type": "call"}, {"api_name": "rules_engine.engine", "line_number": 16, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 7, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rules_engine.engine.period_hdd", "line_number": 28, "usage_type": "call"}, {"api_name": "rules_engine.engine", "line_number": 28, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 19, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rules_engine.engine.average_indoor_temp", "line_number": 37, "usage_type": "call"}, {"api_name": "rules_engine.engine", "line_number": 37, "usage_type": "name"}, {"api_name": "rules_engine.engine.average_indoor_temp", "line_number": 38, "usage_type": "call"}, {"api_name": "rules_engine.engine", "line_number": 38, "usage_type": "name"}, {"api_name": "rules_engine.engine.Home", "line_number": 42, "usage_type": "call"}, {"api_name": "rules_engine.engine", "line_number": 42, "usage_type": "name"}, {"api_name": "rules_engine.engine.FuelType", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rules_engine.engine", "line_number": 43, "usage_type": "name"}, {"api_name": "pytest.approx", "line_number": 61, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 63, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 64, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 65, "usage_type": "call"}, {"api_name": "rules_engine.engine.Home", "line_number": 69, "usage_type": "call"}, {"api_name": "rules_engine.engine", "line_number": 69, "usage_type": "name"}, {"api_name": "rules_engine.engine.FuelType", "line_number": 70, "usage_type": "attribute"}, {"api_name": "rules_engine.engine", "line_number": 70, "usage_type": "name"}, {"api_name": "pytest.approx", "line_number": 87, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 88, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 89, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 90, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 91, "usage_type": "call"}]} {"seq_id": "11902651207", "text": "import requests\r\nimport openpyxl\r\n\r\n# Open a workbook and the active sheet\r\nwb = openpyxl.Workbook()\r\nws = wb.active\r\nws.title = \"Farmers Markets\"\r\n\r\ndef get_business_lists(city, last_row):\r\n print(city)\r\n\r\n # API Definition\r\n # my_API_Key = \"insert Yelp API key\"\r\n endPoint = \"https://api.yelp.com/v3/businesses/search\"\r\n api_headers = {'Authorization': 'bearer {}'.format(my_API_Key)}\r\n\r\n # Parameters\r\n offset = 0\r\n limit = 5\r\n total = 1000\r\n\r\n # Variable declarations\r\n biz_name = []\r\n biz_address = []\r\n biz_phone = []\r\n biz_reviews = []\r\n\r\n while offset < total:\r\n parameters = {\"term\": \"Farmers Markets\",\r\n \"limit\": limit,\r\n \"location\": city,\r\n \"sort_by\": \"rating\",\r\n \"offset\": offset}\r\n\r\n # Make the request and then convert the json to a dictionary\r\n businesses_json_response = requests.get(url=endPoint, params=parameters, headers=api_headers)\r\n businesses = businesses_json_response.json()\r\n\r\n # Update Total\r\n try:\r\n total = businesses[\"total\"]\r\n # print(len(businesses[\"businesses\"]))\r\n except:\r\n print(\"Total is less than {}\".format(limit))\r\n break\r\n\r\n # Append business names, addresses, and phones to their respective lists\r\n for business in businesses[\"businesses\"]:\r\n biz_name.append(business[\"name\"])\r\n biz_address.append(business[\"location\"][\"address1\"])\r\n biz_phone.append(business['display_phone'])\r\n biz_reviews.append(business['review_count'])\r\n\r\n # Write data of excel function\r\n last_row = print_to_excel(biz_name, biz_address, biz_phone, biz_reviews, city,last_row)\r\n\r\n # print(\"Total = {}; Limit = {}; Offset = {}\".format(total, limit, offset))\r\n\r\n if total < limit:\r\n limit = total\r\n offset += limit\r\n elif total >= limit:\r\n offset += limit\r\n\r\n return last_row\r\n\r\n\r\ndef print_to_excel(names, addresses, phones, reviews, city, last_row):\r\n # Write data to excel\r\n elem = 0\r\n while elem < len(names):\r\n ws.cell(row = last_row + elem + 1, column = 1).value = names[elem]\r\n ws.cell(row = last_row + elem + 1, column = 2).value = addresses[elem]\r\n ws.cell(row = last_row + elem + 1, column = 3).value = phones[elem]\r\n ws.cell(row = last_row + elem + 1, column = 4).value = city\r\n ws.cell(row = last_row + elem + 1, column = 5).value = \"ID\"\r\n ws.cell(row = last_row + elem + 1, column = 6).value = reviews[elem]\r\n elem += 1\r\n\r\n last_row = ws.max_row\r\n\r\n return last_row\r\n\r\n\r\ndef main():\r\n\r\n MSA_Columbus = (\r\n \"Columbus\", \"Dublin\", \"Newark\", \"Delaware\", \"Lancaster\", \"Pickerington\", \"London\", \"Marysville\", \"Circleville\",\r\n \"Marion\", \"Zanesville\", \"Chillicothe\", \"New Lexington\", \"Cambridge\", \"Washington Court House\")\r\n MSA_Dayton = (\"Centerville\", \"Dayton\", \"Kettering\", \"Beavercreek\", \"Huber Heights\", \"Fairborn\", \"Miamisburg\",\r\n \"West Carrollton\", \"Springfield\", \"Urbana\", \"Greenville\", \"Sidney\")\r\n MSA_Indianapolis = (\"Indianapolis\", \"Carmel\", \"Fishers\", \"Noblesville\", \"Greenwood\", \"Anderson\", \"Lawrence\",\r\n \"Westfield\", \"Plainfield\", \"Zionsville\", \"Brownsburg\", \"Franklin\", \"Greenfrield\", \"Shelbyville\",\r\n \"Avon\", \"Lebanon\", \"Beech Grove\", \"Speedway\", \"Martinsville\",\r\n \"Greencastle\",\"Danville\", \"Moorseville\")\r\n\r\n last_row = 0 # Keeps track of position in Excel\r\n for city in MSA_Indianapolis:\r\n last_row = get_business_lists(city, last_row)\r\n\r\n\r\n# BEGIN PROGRAM\r\nmain()\r\nwb.save(\"MSA_Indianapolis_Farmers_Markets.xlsx\")\r\n", "repo_name": "Mhauga/Extraction", "sub_path": "Extraction.py", "file_name": "Extraction.py", "file_ext": "py", "file_size_in_byte": 3790, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "openpyxl.Workbook", "line_number": 5, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 36, "usage_type": "call"}]} {"seq_id": "33299107648", "text": "# 백준 단어공부\nimport collections\n\nstring1 = input()\n\ndef strstudy(s):\n s = s.upper()\n string = []\n max_dict = []\n for i in s:\n string += i\n string_dict = collections.Counter(string)\n for key, value in string_dict.items():\n if value == max(string_dict.values()):\n max_dict.append(key)\n if len(max_dict) >= 2:\n return \"?\"\n else:\n return max_dict[0]\na = strstudy(string1)\n\nprint(a)", "repo_name": "ohjooyeong/baekjoonalgo", "sub_path": "string/1157.py", "file_name": "1157.py", "file_ext": "py", "file_size_in_byte": 449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "collections.Counter", "line_number": 12, "usage_type": "call"}]} {"seq_id": "25494823151", "text": "from bs4 import BeautifulSoup\nimport urllib2\nfrom random import choice,randint\nfrom pymongo import MongoClient\n\nurl = 'http://foodgawker.com/page/1/?s_exclude=drinks'\nrequest = urllib2.Request(url, headers={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36'})\nsite = urllib2.urlopen(request)\nhtml = site.read()\n\nparsed_html = BeautifulSoup(html)\nmaxpage = int(parsed_html.body.find('div', attrs={'class' : 'post-section'}).attrs['data-maxpage'])\n\nfor x in xrange(1, maxpage):\n\n url = 'http://foodgawker.com/page/' + str(x) + '/?s_exclude=drinks'\n request = urllib2.Request(url, headers={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36'})\n site = urllib2.urlopen(request)\n html = site.read()\n\n parsed_html = BeautifulSoup(html)\n\n dishes = parsed_html.body.find_all('div', attrs={'class' : 'flipwrapper'})\n\n client = MongoClient('localhost', 27017)\n db = client.recipeasy\n collection = db.recipes\n recipes = db.recipes\n\n for dish in dishes:\n img = dish.find('a', attrs={'class' : 'picture-link'})\n\n recipe = { 'title' : dish.attrs['data-sharetitle'],\n 'description' : dish.find('section', attrs={'class' : 'description'}).text,\n 'link' : img.attrs['href'],\n 'image' : img.find('img').attrs['src'] }\n\n recipes.insert(recipe)\n\n amtDone = float(x)/float(maxpage)\n print(\"\\rProgress: [{0:100s}] {1:.1f}% ({2}/{3})\".format('#' * int(amtDone * 50), amtDone * 100, x, maxpage)),", "repo_name": "aberle/recipeasy", "sub_path": "foodscrape.py", "file_name": "foodscrape.py", "file_ext": "py", "file_size_in_byte": 1643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "urllib2.Request", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 18, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 25, "usage_type": "call"}]} {"seq_id": "17196509488", "text": "from typing import List\nfrom collections import deque\n\nclass Solution:\n def numIslands(self, grid: List[List[str]], method='bfs') -> int:\n if method == 'union_joint':\n return self.numIslandsUnionJoint(grid)\n if method == 'dfs':\n return self.numIslandsDfs(grid)\n if method == 'bfs':\n return self.numIslandsBfs(grid)\n\n def numIslandsUnionJoint(self, grid):\n def find(x):\n nonlocal pa\n while x != pa[x]:\n x = pa[x]\n return x\n\n def union_joint(x, y, nr_groups):\n nonlocal pa\n nonlocal size\n\n root_x, root_y = find(x), find(y)\n if root_x == root_y:\n return nr_groups\n else:\n if size[root_x] > size[root_y]:\n pa[root_y] = root_x\n size[root_x] += size[root_y]\n else:\n pa[root_x] = root_y\n size[root_y] += root_x\n return nr_groups - 1\n\n m, n = len(grid), len(grid[0])\n nr_groups = m * n\n pa = [i for i in range(nr_groups)]\n size = [1] * nr_groups\n nr_land_cell = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n nr_land_cell += 1\n for direction in [[1,0],[0,1]]:\n new_i = i + direction[0]\n new_j = j + direction[1]\n if new_i < m and new_j < n and grid[new_i][new_j] == '1':\n nr_groups = union_joint(i * n + j, new_i * n + new_j, nr_groups)\n return nr_groups - (n*m - nr_land_cell)\n\n\n def numIslandsDfs(self, grid):\n def dfs(i, j, m, n, visited):\n nonlocal grid\n visited[i][j] = 1\n for dir in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n new_i = i + dir[0]\n new_j = j + dir[1]\n if -1 < new_i < m and -1 < new_j < n:\n if visited[new_i][new_j] == 0 and grid[new_i][new_j] == '1':\n dfs(new_i, new_j, m, n, visited)\n\n m, n = len(grid), len(grid[0])\n visited = [[0] * n for _ in range(m)]\n nr_island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '0':\n visited[i][j] = 1\n continue\n if visited[i][j] == 1:\n continue\n dfs(i, j, m, n, visited)\n nr_island += 1\n return nr_island\n\n def numIslandsBfs(self, grid):\n def bfs(x, y, m, n, visited):\n nonlocal grid\n cell_queue = deque([(x, y)])\n visited[x][y] = 1\n while cell_queue:\n pos = cell_queue.popleft()\n i, j = pos\n for dir in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n new_i = i + dir[0]\n new_j = j + dir[1]\n if -1 < new_i < m and -1 < new_j < n:\n if visited[new_i][new_j] == 0 and grid[new_i][new_j] == '1':\n visited[new_i][new_j] = 1\n cell_queue.append((new_i, new_j))\n\n m, n = len(grid), len(grid[0])\n visited = [[0] * n for _ in range(m)]\n nr_island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '0':\n visited[i][j] = 1\n continue\n if visited[i][j] == 1:\n continue\n bfs(i, j, m, n, visited)\n nr_island += 1\n return nr_island\n\n\nsol = Solution()\ncases = [\n {\n \"input\": [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n ],\n \"expect\": 1\n },\n {\n \"input\": [\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"1\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"1\",\"1\"]\n ],\n \"expect\": 3\n },\n]\n\nfor case in cases:\n result = sol.numIslands(case[\"input\"])\n print(case[\"input\"], result)\n assert result == case['expect']\n", "repo_name": "tilaboy/work_note", "sub_path": "algorithms/leetcode_weekly/disjoint_union/200_Number_of_Islands.py", "file_name": "200_Number_of_Islands.py", "file_ext": "py", "file_size_in_byte": 4370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 81, "usage_type": "call"}]} {"seq_id": "12876151330", "text": "import copy\nimport uuid\n\nimport pytest\nfrom httpx import AsyncClient\nfrom sqlalchemy import delete\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import Session\n\nimport services\nfrom app.exceptions import CreatingError\nfrom app.models import stations\nfrom app.schemas import schemas_stations, schemas_washing\nfrom app.schemas import schemas_washing as washing\n# from app.utils.general import read_location\nfrom app.static.enums import RegionEnum, StationStatusEnum, RoleEnum, StationParamsEnum, \\\n\tStationsSortingEnum\nfrom tests.additional import auth, users as users_funcs\nfrom tests.additional.stations import get_station_by_id, generate_station, StationData, change_station_params, \\\n\trand_serial, delete_all_stations, generate_station_programs\nfrom tests.fills import stations as station_fills\n\n\n@pytest.mark.usefixtures(\"generate_users\", \"generate_default_station\")\nclass TestStations:\n\tinstaller: users_funcs.UserData\n\tmanager: users_funcs.UserData\n\tregion_manager: users_funcs.UserData\n\tsysadmin: users_funcs.UserData\n\tlaundry: users_funcs.UserData\n\tstation: StationData\n\n\tasync def test_create_station_with_default_params(self, ac: AsyncClient, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t\t\t\t\t monkeypatch):\n\t\t\"\"\"\n\t\tСоздание станции без указания опциональных параметров.\n\t\t\"\"\"\n\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"Qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\t# real_location = await read_location(\"Санкт-Петербург\")\n\t\tstation_response = schemas_stations.Station(\n\t\t\t**response.json()\n\t\t) # Validation error может подняться, если что-то не так\n\t\tstation_in_db = await get_station_by_id(station_response.id, session)\n\t\tassert station_response.dict() == station_in_db.dict()\n\t\tassert len(station_in_db.station_washing_agents) == services.DEFAULT_STATION_WASHING_AGENTS_AMOUNT\n\t\tassert len(station_in_db.station_washing_machines) == services.DEFAULT_STATION_WASHING_MACHINES_AMOUNT\n\t\tassert station_in_db.station_programs\n\t\t# assert station_in_db.location[\"latitude\"] == real_location.latitude and \\\n\t\t# \t station_in_db.location[\"longitude\"] == real_location.longitude\n\t\tassert station_in_db.is_active == services.DEFAULT_STATION_IS_ACTIVE\n\t\tassert station_in_db.is_protected == services.DEFAULT_STATION_IS_PROTECTED\n\t\tassert station_in_db.station_control.status == services.DEFAULT_STATION_STATUS\n\t\tassert not (station_in_db.is_active and not station_in_db.station_settings.teh_power), \\\n\t\t\t\"If station is active, teh must be powered on\"\n\t\tassert not (not station_in_db.is_active and (station_in_db.station_settings.station_power is True or\n\t\t\t\t\t\t\t\t\t\t\t\t\t station_in_db.station_control.status is not None\n\t\t\t\t\t\t\t\t\t\t\t\t\t or station_in_db.station_settings.teh_power is True)), \\\n\t\t\t\"If station isn't active, station power and TEH power must be False and station status must be null\"\n\t\tassert not (\n\t\t\tstation_in_db.station_settings.station_power is True and station_in_db.station_control.status is None), \\\n\t\t\t\"If station is powered on, station status must be not null\"\n\t\tassert not (station_in_db.station_control.status == StationStatusEnum.WORKING and not all(\n\t\t\tstation_in_db.station_control.washing_machine and any(\n\t\t\t\t(station_in_db.station_control.program_step, station_in_db.station_control.washing_agents)\n\t\t\t)\n\t\t)), \"If station status is working, washing machine must be defined, and one of params [program_step, washing_agents] \" \\\n\t\t\t\"must be not null\"\n\t\tassert station_in_db.name == station_data[\"station\"][\"name\"]\n\n\tasync def test_create_station_with_advanced_params(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tСоздание станции с ручным вводом параметров.\n\t\t\"\"\"\n\t\tparams = station_fills.test_create_station_with_advanced_params\n\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=params\n\t\t)\n\n\t\tstation_response = schemas_stations.Station(**response.json())\n\t\tstation_in_db = await get_station_by_id(station_response.id, session)\n\t\tparams = params[\"station\"]\n\t\tparams[\"region\"] = RegionEnum.NORTHWEST # или менять на строковый регион в полученных объектах\n\n\t\tassert station_response.dict() == station_in_db.dict()\n\t\tassert len(station_in_db.station_washing_agents) == len(params[\"washing_agents\"])\n\t\tassert len(station_in_db.station_washing_machines) == len(params[\"washing_machines\"])\n\t\tassert len(station_in_db.station_programs) == len(params[\"programs\"])\n\t\tfor k, v in params.items():\n\t\t\tif k in station_in_db.dict():\n\t\t\t\tassert station_in_db.dict()[k] == v\n\t\tfor k, v in params[\"settings\"].items():\n\t\t\tassert getattr(station_in_db.station_settings, k) == v\n\n\t\tfor program in station_in_db.station_programs:\n\t\t\tdefined_program = next(pg for pg in params[\"programs\"] if pg[\"program_step\"] == program.program_step)\n\t\t\tassert program.program_step == defined_program[\"program_step\"]\n\t\t\tassert program.name == defined_program[\"name\"]\n\t\t\tfor washing_agent in program.washing_agents:\n\t\t\t\tfor ag in defined_program[\"washing_agents\"]:\n\t\t\t\t\tif isinstance(ag, int) and ag == washing_agent.agent_number:\n\t\t\t\t\t\tdefined_washing_agent = next(agent for agent in params[\"washing_agents\"] if\n\t\t\t\t\t\t\t\t\t\t\t\t\t agent[\"agent_number\"] == ag)\n\t\t\t\t\t\tdefined_washing_agent = washing.WashingAgentCreateMixedInfo(**defined_washing_agent)\n\t\t\t\t\telif isinstance(ag, dict) and ag[\"agent_number\"] == washing_agent.agent_number:\n\t\t\t\t\t\tdefined_washing_agent = washing.WashingAgentWithoutRollback(**ag)\n\t\t\t\tassert washing_agent.volume == defined_washing_agent.volume\n\n\t\tdefault_washing_agents_params = {\n\t\t\t\"rollback\": services.DEFAULT_WASHING_AGENTS_ROLLBACK,\n\t\t\t\"volume\": services.DEFAULT_WASHING_AGENTS_VOLUME\n\t\t}\n\n\t\tfor washing_agent in station_in_db.station_washing_agents:\n\t\t\tdefined_washing_agent = next(ag for ag in params[\"washing_agents\"]\n\t\t\t\t\t\t\t\t\t\t if ag[\"agent_number\"] == washing_agent.agent_number)\n\t\t\tfor param in default_washing_agents_params:\n\t\t\t\tdefault_param = default_washing_agents_params.get(param)\n\t\t\t\tif param in defined_washing_agent:\n\t\t\t\t\tassert getattr(washing_agent, param) == defined_washing_agent[param]\n\t\t\t\telse:\n\t\t\t\t\tassert getattr(washing_agent, param) == default_param\n\n\t\tdefault_washing_machines_params = {\n\t\t\t\"track_length\": services.DEFAULT_WASHING_MACHINES_TRACK_LENGTH,\n\t\t\t\"is_active\": services.DEFAULT_WASHING_MACHINES_IS_ACTIVE,\n\t\t\t\"volume\": services.DEFAULT_WASHING_MACHINES_VOLUME\n\t\t}\n\t\tfor washing_machine in station_in_db.station_washing_machines:\n\t\t\tdefined_washing_machine = next(machine for machine in params[\"washing_machines\"]\n\t\t\t\t\t\t\t\t\t\t if machine[\"machine_number\"] == washing_machine.machine_number)\n\t\t\tfor param in default_washing_machines_params:\n\t\t\t\tdefault_param = default_washing_machines_params.get(param)\n\t\t\t\tif param in defined_washing_machine:\n\t\t\t\t\tassert getattr(washing_machine, param) == defined_washing_machine[param]\n\t\t\t\telse:\n\t\t\t\t\tassert getattr(washing_machine, param) == default_param\n\n\t\tparams[\"region\"] = \"Северо-западный\" # меняю обратно для след тестов\n\n\tasync def test_create_station_with_default_programs(self, session: AsyncSession, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmonkeypatch, sync_session: Session):\n\t\tprograms = generate_station_programs(amount=4, as_schema=True)\n\n\t\tasync def get_default_programs(*args, **kwargs):\n\t\t\treturn programs\n\t\tmonkeypatch.setattr(stations.StationProgram, \"get_default_programs\", get_default_programs)\n\n\t\tstation = await generate_station(ac, sync_session, self.sysadmin, use_default_programs=True)\n\n\t\tassert any(station.station_programs)\n\t\tassert len(station.station_programs) == len(programs)\n\t\tfor pg in programs:\n\t\t\tstation_pg = next(p for p in station.station_programs if p.program_step == pg.program_step)\n\t\t\tassert pg.program_number == station_pg.program_number\n\t\t\tassert pg.name == station_pg.name\n\t\t\tstation_pg_washing_agent_number = [ag.agent_number for ag in station_pg.washing_agents]\n\t\t\tfor ag in pg.washing_agents:\n\t\t\t\tag = schemas_washing.WashingAgentWithoutRollback(**ag) # тип почему-то нарушен\n\t\t\t\tassert ag.agent_number in station_pg_washing_agent_number\n\n\tasync def test_create_station_with_invalid_default_programs(self, session: AsyncSession, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t monkeypatch, sync_session: Session):\n\t\tasync def get_default_programs(*args, **kwargs):\n\t\t\traise CreatingError(\"Some getting default program exception\")\n\t\tmonkeypatch.setattr(stations.StationProgram, \"get_default_programs\", get_default_programs)\n\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"Qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\n\t\tr = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert r.status_code == 422\n\n\tasync def test_create_station_with_getting_default_programs_connection_error(self, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t monkeypatch):\n\t\tasync def get_default_programs(*args, **kwargs):\n\t\t\traise ConnectionError(\"Some getting default program exception\")\n\t\tmonkeypatch.setattr(stations.StationProgram, \"get_default_programs\", get_default_programs)\n\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"Qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\n\t\tr = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert r.status_code == 400\n\n\tasync def test_create_station_with_comment(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tНовый параметр, поэтому добавлю отдельный тест\n\t\t\"\"\"\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial(),\n\t\t\t\"comment\": \"it is test!\"\n\t\t})\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert response.status_code == 201\n\t\tr = schemas_stations.Station(**response.json())\n\t\tassert r.comment == station_data[\"station\"][\"comment\"]\n\n\t\t# ______________\n\n\t\t\"\"\"\n\t\tпустой коммент\n\t\t\"\"\"\n\t\tdel station_data[\"station\"][\"comment\"]\n\t\tstation_data[\"station\"][\"serial\"] = rand_serial()\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert response.status_code == 201\n\t\tassert response.json()[\"comment\"] is None\n\n\tasync def test_create_station_not_released(self, session: AsyncSession, ac: AsyncClient):\n\t\t\"\"\"\n\t\tЕсли станция не выпущена - установится пустая дата создания\n\t\t\"\"\"\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\turl = \"/v1/stations/?released=false\"\n\n\t\tr = await ac.post(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\n\t\tassert r.status_code == 201\n\t\tr = schemas_stations.Station(**r.json())\n\t\tassert r.created_at is None\n\n\tasync def test_release_station(self, session: AsyncSession, ac: AsyncClient):\n\t\t\"\"\"\n\t\tЕсли станция не выпущена, можно ее выпустить.\n\t\t\"\"\"\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\turl = \"/v1/stations/?released=false\"\n\t\tr = await ac.post(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\n\t\tstation = schemas_stations.Station(**r.json())\n\n\t\trelease_r = await ac.patch(\n\t\t\tf\"/v1/stations/release/{station.id}\",\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert release_r.status_code == 200\n\t\tstation = schemas_stations.StationGeneralParams(**release_r.json())\n\t\tassert station.created_at\n\n\tasync def test_not_released_station(self, session: AsyncSession, ac: AsyncClient):\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\turl = \"/v1/stations/?released=false\"\n\t\tr = await ac.post(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tstation = schemas_stations.Station(**r.json())\n\n\t\trequest_to_station_r = await ac.get(\n\t\t\tf\"/v1/manage/station/{station.id}/{StationParamsEnum.GENERAL}\",\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert request_to_station_r.status_code == 403\n\n\t\t# ____\n\t\tstation_headers = {\"X-Station-Uuid\": str(station.id)}\n\n\t\trequest_from_station_r = await ac.get(\n\t\t\t\"/v1/stations/me\",\n\t\t\theaders=station_headers\n\t\t)\n\t\tassert request_from_station_r.status_code == 403\n\n\tasync def test_release_station_not_sysadmin_role(self, session: AsyncSession, ac: AsyncClient):\n\t\turl = f\"/v1/stations/release/{self.station.id}\"\n\t\tr = await ac.patch(\n\t\t\turl,\n\t\t\theaders=self.manager.headers\n\t\t)\n\t\tassert r.status_code == 403\n\n\tasync def test_release_station_not_existing_station_id(self, session: AsyncSession, ac: AsyncClient):\n\t\trand_uuid = uuid.uuid4()\n\t\turl = f\"/v1/stations/release/{rand_uuid}\"\n\t\tr = await ac.patch(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert r.status_code == 404\n\n\tasync def test_release_station_already_released(self, session: AsyncSession, ac: AsyncClient):\n\t\turl = f\"/v1/stations/release/{self.station.id}\"\n\t\tr = await ac.patch(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert r.status_code == 400\n\n\tasync def test_create_station_errors(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\t- Нельзя передать в программу несуществующее стиральное средство;\n\t\t- Нельзя передать невалидные параметры станции (норм проверяются в schemas_stations);\n\t\t- roles auto test;\n\t\t- users auth auto test.\n\t\t\"\"\"\n\t\tparams = copy.deepcopy(station_fills.test_create_station_with_advanced_params)\n\t\tif not isinstance(params[\"station\"][\"region\"], str):\n\t\t\tparams[\"station\"][\"region\"] = params[\"station\"][\n\t\t\t\t\"region\"].value # не успевает поменяться обратно на строку (\n\t\t# _______________________________________________________________\n\t\tparams[\"station\"][\"programs\"].append(\n\t\t\t{\n\t\t\t\t\"program_step\": 13,\n\t\t\t\t\"washing_agents\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"agent_number\": 5 # такого в списке нет\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t)\n\n\t\tnon_existing_washing_agent_r = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=params\n\t\t)\n\n\t\tassert non_existing_washing_agent_r.status_code == 422\n\n\t\tparams[\"station\"][\"programs\"].remove(params[\"station\"][\"programs\"][-1])\n\n\t\t# ________________________________________________________\n\n\t\tparams[\"station\"][\"settings\"][\"station_power\"] = True\n\t\tparams[\"station\"][\"is_active\"] = False\n\n\t\tinvalid_params_r = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=params\n\t\t)\n\n\t\tassert invalid_params_r.status_code == 422\n\n\t\t# ________________________________________________________\n\n\t\tawait auth.url_auth_roles_test(\n\t\t\t\"/v1/stations/\", \"post\",\n\t\t\tRoleEnum.SYSADMIN, self.sysadmin,\n\t\t\tsession, ac, json=station_fills.test_create_station_with_advanced_params\n\t\t)\n\t\tawait auth.url_auth_test(\n\t\t\t\"/v1/stations/\", \"post\", self.sysadmin, ac, session,\n\t\t\tjson=station_fills.test_create_station_with_advanced_params\n\t\t)\n\n\tasync def test_read_all_stations(self, ac: AsyncClient, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t sync_session: Session):\n\t\t\"\"\"\n\t\tЧтение списка станций.\n\t\t\"\"\"\n\t\tawait delete_all_stations(session) # не могу проследить, откуда появляется ошибка - где-то\n\t\t# из контроля станции удалил стиральную машину при рабочем состоянии\n\t\tstations_ = await StationData.generate_stations_list(ac, sync_session, self.sysadmin, session,\n\t\t\t\t\t\t\t\t\t\t\t\t amount=10)\n\t\t# ____\n\t\t# sys, manager role\n\t\tfor user in (self.sysadmin, self.manager):\n\t\t\tr = await ac.get(\n\t\t\t\t\"/v1/stations/\",\n\t\t\t\theaders=user.headers\n\t\t\t)\n\t\t\tassert r.status_code == 200\n\t\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\t\tassert len(r) >= len(stations_)\n\n\t\t# ____\n\t\t# region manager & installer role\n\t\tfor user in (self.installer, self.region_manager):\n\t\t\tr = await ac.get(\n\t\t\t\t\"/v1/stations/\",\n\t\t\t\theaders=user.headers\n\t\t\t)\n\t\t\tassert r.status_code == 200\n\t\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\t\tassert len(r)\n\t\t\tassert all(\n\t\t\t\t(st.general.region == user.region for st in r)\n\t\t\t)\n\n\tasync def test_read_all_stations_(self, session: AsyncSession, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t sync_session: Session):\n\t\t\"\"\"\n\t\tпроверить, что точно возвращаются параметры\n\t\t\"\"\"\n\t\tawait self.station.generate_data_for_read_stations_list(\n\t\t\tsession, ac, sync_session, ctrl=True, owner=True, logs=True\n\t\t)\n\t\tr = await ac.get(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\tstation = next(s for s in r if str(s.general.id) == str(self.station.id))\n\t\tassert station.last_work_at\n\t\tassert station.last_maintenance_at\n\t\tassert station.owner\n\t\tassert station.control.status\n\n\tasync def test_read_all_stations_with_ordering(self, ac: AsyncClient, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t\t\t\t sync_session: Session):\n\t\theaders = self.sysadmin.headers\n\t\tawait StationData.generate_stations_list(ac, sync_session, self.sysadmin, session,\n\t\t\t\t\t\t\t\t\t\t\t\t amount=10)\n\t\tsorting_keys = {StationsSortingEnum.OWNER: lambda st_: st_.owner.last_name,\n\t\t\t\t\t\tStationsSortingEnum.STATUS: lambda st_: st_.control.status.value,\n\t\t\t\t\t\tStationsSortingEnum.MAINTENANCE: lambda st_: st_.last_maintenance_at,\n\t\t\t\t\t\tStationsSortingEnum.LAST_WORK: lambda st_: st_.last_work_at,\n\t\t\t\t\t\tStationsSortingEnum.NAME: lambda st_: st_.general.name,\n\t\t\t\t\t\tStationsSortingEnum.REGION: lambda st_: st_.general.region.value}\n\t\tfor order in list(StationsSortingEnum):\n\t\t\tfor desc in (True, False):\n\t\t\t\turl = f\"/v1/stations/?order_by={order.value}\"\n\t\t\t\tsorting_params = {\"key\": sorting_keys[order]}\n\t\t\t\tif desc:\n\t\t\t\t\turl += \"&desc=true\"\n\t\t\t\t\tsorting_params[\"reverse\"] = True\n\t\t\t\tr = await ac.get(url, headers=headers)\n\t\t\t\tassert r.status_code == 200\n\t\t\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\t\t\tnullable_objs = []\n\t\t\t\tfor s in r:\n\t\t\t\t\tnullable_fields = {StationsSortingEnum.OWNER: s.owner,\n\t\t\t\t\t\t\t\t\t StationsSortingEnum.STATUS: s.control.status,\n\t\t\t\t\t\t\t\t\t StationsSortingEnum.MAINTENANCE: s.last_maintenance_at,\n\t\t\t\t\t\t\t\t\t StationsSortingEnum.LAST_WORK: s.last_work_at}\n\t\t\t\t\tif order in nullable_fields:\n\t\t\t\t\t\tnullable_field = nullable_fields[order]\n\t\t\t\t\t\tif nullable_field is None:\n\t\t\t\t\t\t\tnullable_objs.append(s)\n\t\t\t\tfor obj in nullable_objs:\n\t\t\t\t\tdel r[r.index(obj)]\n\t\t\t\tassert r == sorted(r, **sorting_params)\n\n\tasync def test_read_all_stations_by_not_permitted_user(self, ac: AsyncClient, session: AsyncSession):\n\t\tr = await ac.get(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.laundry.headers\n\t\t)\n\t\tassert r.status_code == 403\n\n\tasync def test_read_all_stations_not_authenticated(self, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t session: AsyncSession):\n\t\tawait auth.url_auth_test(\n\t\t\t\"/v1/stations/\", \"get\", self.sysadmin,\n\t\t\tac, session\n\t\t)\n\n\tasync def test_read_stations_params(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tЧастичное чтение данных станции станцией.\n\t\t\"\"\"\n\t\tgeneral_params_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.GENERAL.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert general_params_r.status_code == 200\n\t\tresult = schemas_stations.StationGeneralParamsForStation(**general_params_r.json())\n\t\tfor k, v in self.station.__dict__.items():\n\t\t\tif k in result.dict():\n\t\t\t\tassert getattr(result, k) == v\n\n\t\t# _____________________________________________________\n\n\t\tsettings_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.SETTINGS.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert settings_r.status_code == 200\n\t\tsettings_result = schemas_stations.StationSettings(**settings_r.json())\n\n\t\t# _____________________________________________________\n\n\t\tcontrol_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.CONTROL.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert control_r.status_code == 200\n\t\tresult = schemas_stations.StationControl(**control_r.json())\n\n\t\tassert settings_result.station_power is True and result.status == StationStatusEnum.AWAITING or \\\n\t\t\t settings_result.station_power is False and result.status is None\n\n\t\t# _____________________________________________________\n\n\t\twashing_agents_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.WASHING_AGENTS.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert washing_agents_r.status_code == 200\n\t\twashing_agents_result = washing_agents_r.json()\n\n\t\tfor washing_agent in washing_agents_result:\n\t\t\twashing_agent = washing.WashingAgent(**washing_agent) # Validation error\n\t\t\tassert services.MIN_WASHING_AGENTS_VOLUME <= washing_agent.volume <= services.MAX_WASHING_AGENTS_VOLUME\n\t\t\tassert washing_agent.rollback is services.DEFAULT_WASHING_AGENTS_ROLLBACK\n\t\t# _____________________________________________________\n\n\t\tprograms_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.PROGRAMS.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert programs_r.status_code == 200\n\n\t\tresult = programs_r.json()\n\t\tfor program in result:\n\t\t\tprogram = schemas_stations.StationProgram(**program)\n\t\t\tassert program.program_number == program.program_step // 10\n\t\t\tfor washing_agent in program.washing_agents:\n\t\t\t\tassert washing_agent.agent_number in [ag[\"agent_number\"] for ag in washing_agents_result]\n\t\t\t\tassert services.MIN_STATION_WASHING_AGENTS_AMOUNT <= washing_agent.agent_number <= \\\n\t\t\t\t\t services.MAX_STATION_WASHING_AGENTS_AMOUNT\n\n\t\t# ____________________________________________________\n\n\t\twashing_machines_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.WASHING_MACHINES.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert washing_machines_r.status_code == 200\n\t\tresult = washing_machines_r.json()\n\n\t\tfor machine in result:\n\t\t\tmachine = washing.WashingMachine(**machine)\n\t\t\tassert services.MIN_WASHING_MACHINE_VOLUME <= machine.volume <= services.MAX_WASHING_MACHINE_VOLUME\n\t\t\tassert services.MIN_STATION_WASHING_MACHINES_AMOUNT <= machine.machine_number \\\n\t\t\t\t <= services.MAX_STATION_WASHING_MACHINES_AMOUNT\n\t\t\tassert services.MIN_WASHING_MACHINE_TRACK_LENGTH <= machine.track_length <= \\\n\t\t\t\t services.MAX_WASHING_MACHINE_TRACK_LENGTH\n\t\t\tassert machine.is_active == services.DEFAULT_WASHING_MACHINES_IS_ACTIVE\n\n\tasync def test_read_stations_params_errors(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\t- Отсутствие данных по станции;\n\t\t- stations auth auto test\n\t\t\"\"\"\n\t\tawait session.execute(\n\t\t\tdelete(stations.StationControl).where(stations.StationControl.station_id == self.station.id)\n\t\t)\n\t\tawait session.commit()\n\n\t\tnon_existing_data_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.CONTROL.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert non_existing_data_r.status_code == 404\n\n\t\tstation = await generate_station(ac, user=self.sysadmin)\n\n\t\tawait auth.url_auth_stations_test(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.GENERAL.value,\n\t\t\t\"get\", station, session, ac\n\t\t)\n\n\tasync def test_read_stations_me(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tЧтение всех данных по станции станцией.\n\t\t\"\"\"\n\t\tresponse = await ac.get(\n\t\t\t\"/v1/stations/me\",\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert response.status_code == 200\n\t\tschemas_stations.StationForStation(**response.json()) # Validation error\n\n\tasync def test_read_station_me_errors(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\t- Отсутствие данных по станции;\n\t\t- stations auth auto test\n\t\t\"\"\"\n\t\tawait auth.url_auth_stations_test(\n\t\t\t\"/v1/stations/me\", \"get\", self.station, session, ac\n\t\t)\n\t\tawait change_station_params(self.station, session, status=StationStatusEnum.AWAITING)\n\n\t\tawait session.execute(\n\t\t\tdelete(stations.StationSettings).where(stations.StationSettings.station_id == self.station.id)\n\t\t)\n\t\tawait session.commit()\n\n\t\tnon_existing_data_r = await ac.get(\n\t\t\t\"/v1/stations/me\",\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert non_existing_data_r.status_code == 404\n\n", "repo_name": "Dahaka1/lfs", "sub_path": "tests/test_stations.py", "file_name": "test_stations.py", "file_ext": "py", "file_size_in_byte": 25048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "tests.additional.users.UserData", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tests.additional.users", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.additional.users.UserData", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tests.additional.users", "line_number": 27, "usage_type": "name"}, {"api_name": "tests.additional.users.UserData", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tests.additional.users", "line_number": 28, "usage_type": "name"}, {"api_name": "tests.additional.users.UserData", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tests.additional.users", "line_number": 29, "usage_type": "name"}, {"api_name": "tests.additional.users.UserData", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tests.additional.users", "line_number": 30, "usage_type": "name"}, {"api_name": "tests.additional.stations.StationData", "line_number": 31, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 33, "usage_type": "name"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 44, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 44, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 45, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations.Station", "line_number": 53, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 53, "usage_type": "name"}, {"api_name": "tests.additional.stations.get_station_by_id", "line_number": 56, "usage_type": "call"}, {"api_name": "services.DEFAULT_STATION_WASHING_AGENTS_AMOUNT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_STATION_WASHING_MACHINES_AMOUNT", "line_number": 59, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_STATION_IS_ACTIVE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_STATION_IS_PROTECTED", "line_number": 64, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_STATION_STATUS", "line_number": 65, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationStatusEnum.WORKING", "line_number": 75, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationStatusEnum", "line_number": 75, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 83, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 83, "usage_type": "name"}, {"api_name": "tests.fills.stations.test_create_station_with_advanced_params", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tests.fills.stations", "line_number": 87, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.Station", "line_number": 95, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 95, "usage_type": "name"}, {"api_name": "tests.additional.stations.get_station_by_id", "line_number": 96, "usage_type": "call"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 98, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 98, "usage_type": "name"}, {"api_name": "app.schemas.schemas_washing.WashingAgentCreateMixedInfo", "line_number": 119, "usage_type": "call"}, {"api_name": "app.schemas.schemas_washing", "line_number": 119, "usage_type": "name"}, {"api_name": "app.schemas.schemas_washing.WashingAgentWithoutRollback", "line_number": 121, "usage_type": "call"}, {"api_name": "app.schemas.schemas_washing", "line_number": 121, "usage_type": "name"}, {"api_name": "services.DEFAULT_WASHING_AGENTS_ROLLBACK", "line_number": 125, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_WASHING_AGENTS_VOLUME", "line_number": 126, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_WASHING_MACHINES_TRACK_LENGTH", "line_number": 140, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_WASHING_MACHINES_IS_ACTIVE", "line_number": 141, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_WASHING_MACHINES_VOLUME", "line_number": 142, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 156, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 156, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 157, "usage_type": "name"}, {"api_name": "tests.additional.stations.generate_station_programs", "line_number": 158, "usage_type": "call"}, {"api_name": "app.models.stations.StationProgram", "line_number": 162, "usage_type": "attribute"}, {"api_name": "app.models.stations", "line_number": 162, "usage_type": "name"}, {"api_name": "tests.additional.stations.generate_station", "line_number": 164, "usage_type": "call"}, {"api_name": "app.schemas.schemas_washing.WashingAgentWithoutRollback", "line_number": 174, "usage_type": "call"}, {"api_name": "app.schemas.schemas_washing", "line_number": 174, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 177, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 177, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 178, "usage_type": "name"}, {"api_name": "app.exceptions.CreatingError", "line_number": 180, "usage_type": "call"}, {"api_name": "app.models.stations.StationProgram", "line_number": 181, "usage_type": "attribute"}, {"api_name": "app.models.stations", "line_number": 181, "usage_type": "name"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 188, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 188, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 189, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 199, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 200, "usage_type": "name"}, {"api_name": "app.models.stations.StationProgram", "line_number": 204, "usage_type": "attribute"}, {"api_name": "app.models.stations", "line_number": 204, "usage_type": "name"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 211, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 211, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 212, "usage_type": "call"}, {"api_name": "httpx.AsyncClient", "line_number": 222, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 222, "usage_type": "name"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 231, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 231, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 232, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations.Station", "line_number": 241, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 241, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 250, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 259, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 259, "usage_type": "name"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 268, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 268, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 269, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations.Station", "line_number": 280, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 280, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 283, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 283, "usage_type": "name"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 292, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 292, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 293, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations.Station", "line_number": 302, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 302, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationGeneralParams", "line_number": 309, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 309, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 312, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 312, "usage_type": "name"}, {"api_name": "app.static.enums.RegionEnum.NORTHWEST", "line_number": 318, "usage_type": "attribute"}, {"api_name": "app.static.enums.RegionEnum", "line_number": 318, "usage_type": "name"}, {"api_name": "tests.additional.stations.rand_serial", "line_number": 319, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations.Station", "line_number": 327, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 327, "usage_type": "name"}, {"api_name": "app.static.enums.StationParamsEnum.GENERAL", "line_number": 330, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 330, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 344, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 344, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 352, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 352, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 353, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 361, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 361, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 369, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 369, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 376, "usage_type": "call"}, {"api_name": "tests.fills.stations.test_create_station_with_advanced_params", "line_number": 376, "usage_type": "attribute"}, {"api_name": "tests.fills.stations", "line_number": 376, "usage_type": "name"}, {"api_name": "tests.additional.auth.url_auth_roles_test", "line_number": 417, "usage_type": "call"}, {"api_name": "tests.additional.auth", "line_number": 417, "usage_type": "name"}, {"api_name": "app.static.enums.RoleEnum.SYSADMIN", "line_number": 419, "usage_type": "attribute"}, {"api_name": "app.static.enums.RoleEnum", "line_number": 419, "usage_type": "name"}, {"api_name": "tests.fills.stations.test_create_station_with_advanced_params", "line_number": 420, "usage_type": "attribute"}, {"api_name": "tests.fills.stations", "line_number": 420, "usage_type": "name"}, {"api_name": "tests.additional.auth.url_auth_test", "line_number": 422, "usage_type": "call"}, {"api_name": "tests.additional.auth", "line_number": 422, "usage_type": "name"}, {"api_name": "tests.fills.stations.test_create_station_with_advanced_params", "line_number": 424, "usage_type": "attribute"}, {"api_name": "tests.fills.stations", "line_number": 424, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 427, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 427, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 428, "usage_type": "name"}, {"api_name": "tests.additional.stations.delete_all_stations", "line_number": 432, "usage_type": "call"}, {"api_name": "tests.additional.stations.StationData.generate_stations_list", "line_number": 434, "usage_type": "call"}, {"api_name": "tests.additional.stations.StationData", "line_number": 434, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationInList", "line_number": 444, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 444, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationInList", "line_number": 455, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 455, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 461, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 461, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 462, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationInList", "line_number": 473, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 473, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 480, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 480, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 481, "usage_type": "name"}, {"api_name": "tests.additional.stations.StationData.generate_stations_list", "line_number": 483, "usage_type": "call"}, {"api_name": "tests.additional.stations.StationData", "line_number": 483, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.OWNER", "line_number": 485, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 485, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.STATUS", "line_number": 486, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 486, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.MAINTENANCE", "line_number": 487, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 487, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.LAST_WORK", "line_number": 488, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 488, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.NAME", "line_number": 489, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 489, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.REGION", "line_number": 490, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 490, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 491, "usage_type": "argument"}, {"api_name": "app.schemas.schemas_stations.StationInList", "line_number": 500, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 500, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.OWNER", "line_number": 503, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 503, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.STATUS", "line_number": 504, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 504, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.MAINTENANCE", "line_number": 505, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 505, "usage_type": "name"}, {"api_name": "app.static.enums.StationsSortingEnum.LAST_WORK", "line_number": 506, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationsSortingEnum", "line_number": 506, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 515, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 515, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 522, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 523, "usage_type": "name"}, {"api_name": "tests.additional.auth.url_auth_test", "line_number": 524, "usage_type": "call"}, {"api_name": "tests.additional.auth", "line_number": 524, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 529, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 529, "usage_type": "name"}, {"api_name": "app.static.enums.StationParamsEnum.GENERAL", "line_number": 534, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 534, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationGeneralParamsForStation", "line_number": 538, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 538, "usage_type": "name"}, {"api_name": "app.static.enums.StationParamsEnum.SETTINGS", "line_number": 546, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 546, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationSettings", "line_number": 551, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 551, "usage_type": "name"}, {"api_name": "app.static.enums.StationParamsEnum.CONTROL", "line_number": 556, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 556, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationControl", "line_number": 560, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 560, "usage_type": "name"}, {"api_name": "app.static.enums.StationStatusEnum.AWAITING", "line_number": 562, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationStatusEnum", "line_number": 562, "usage_type": "name"}, {"api_name": "app.static.enums.StationParamsEnum.WASHING_AGENTS", "line_number": 568, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 568, "usage_type": "name"}, {"api_name": "app.schemas.schemas_washing.WashingAgent", "line_number": 575, "usage_type": "call"}, {"api_name": "app.schemas.schemas_washing", "line_number": 575, "usage_type": "name"}, {"api_name": "services.MIN_WASHING_AGENTS_VOLUME", "line_number": 576, "usage_type": "attribute"}, {"api_name": "services.MAX_WASHING_AGENTS_VOLUME", "line_number": 576, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_WASHING_AGENTS_ROLLBACK", "line_number": 577, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum.PROGRAMS", "line_number": 581, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 581, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationProgram", "line_number": 588, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 588, "usage_type": "name"}, {"api_name": "services.MIN_STATION_WASHING_AGENTS_AMOUNT", "line_number": 592, "usage_type": "attribute"}, {"api_name": "services.MAX_STATION_WASHING_AGENTS_AMOUNT", "line_number": 593, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum.WASHING_MACHINES", "line_number": 598, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 598, "usage_type": "name"}, {"api_name": "app.schemas.schemas_washing.WashingMachine", "line_number": 606, "usage_type": "call"}, {"api_name": "app.schemas.schemas_washing", "line_number": 606, "usage_type": "name"}, {"api_name": "services.MIN_WASHING_MACHINE_VOLUME", "line_number": 607, "usage_type": "attribute"}, {"api_name": "services.MAX_WASHING_MACHINE_VOLUME", "line_number": 607, "usage_type": "attribute"}, {"api_name": "services.MIN_STATION_WASHING_MACHINES_AMOUNT", "line_number": 608, "usage_type": "attribute"}, {"api_name": "services.MAX_STATION_WASHING_MACHINES_AMOUNT", "line_number": 609, "usage_type": "attribute"}, {"api_name": "services.MIN_WASHING_MACHINE_TRACK_LENGTH", "line_number": 610, "usage_type": "attribute"}, {"api_name": "services.MAX_WASHING_MACHINE_TRACK_LENGTH", "line_number": 611, "usage_type": "attribute"}, {"api_name": "services.DEFAULT_WASHING_MACHINES_IS_ACTIVE", "line_number": 612, "usage_type": "attribute"}, {"api_name": "httpx.AsyncClient", "line_number": 614, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 614, "usage_type": "name"}, {"api_name": "sqlalchemy.delete", "line_number": 620, "usage_type": "call"}, {"api_name": "app.models.stations.StationControl", "line_number": 620, "usage_type": "attribute"}, {"api_name": "app.models.stations", "line_number": 620, "usage_type": "name"}, {"api_name": "app.static.enums.StationParamsEnum.CONTROL", "line_number": 625, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 625, "usage_type": "name"}, {"api_name": "tests.additional.stations.generate_station", "line_number": 631, "usage_type": "call"}, {"api_name": "tests.additional.auth.url_auth_stations_test", "line_number": 633, "usage_type": "call"}, {"api_name": "tests.additional.auth", "line_number": 633, "usage_type": "name"}, {"api_name": "app.static.enums.StationParamsEnum.GENERAL", "line_number": 634, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationParamsEnum", "line_number": 634, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 638, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 638, "usage_type": "name"}, {"api_name": "app.schemas.schemas_stations.StationForStation", "line_number": 647, "usage_type": "call"}, {"api_name": "app.schemas.schemas_stations", "line_number": 647, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 649, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 649, "usage_type": "name"}, {"api_name": "tests.additional.auth.url_auth_stations_test", "line_number": 654, "usage_type": "call"}, {"api_name": "tests.additional.auth", "line_number": 654, "usage_type": "name"}, {"api_name": "tests.additional.stations.change_station_params", "line_number": 657, "usage_type": "call"}, {"api_name": "app.static.enums.StationStatusEnum.AWAITING", "line_number": 657, "usage_type": "attribute"}, {"api_name": "app.static.enums.StationStatusEnum", "line_number": 657, "usage_type": "name"}, {"api_name": "sqlalchemy.delete", "line_number": 660, "usage_type": "call"}, {"api_name": "app.models.stations.StationSettings", "line_number": 660, "usage_type": "attribute"}, {"api_name": "app.models.stations", "line_number": 660, "usage_type": "name"}, {"api_name": "pytest.mark.usefixtures", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 24, "usage_type": "attribute"}]} {"seq_id": "39316182852", "text": "'''\nMisfit of the Advection-Diffusion problem written in FEniCS-2019.1.0 and hIPPYlib-3.0\nhttps://hippylib.github.io/tutorials_v3.0.0/4_AdvectionDiffusionBayesian/\n-------------------------------------------------------------------------\nProject of Bayesian SpatioTemporal analysis for Inverse Problems (B-STIP)\nShiwei Lan @ ASU, Sept. 2020\n--------------------------------------------------------------------------\nCreated on Sep 23, 2020\n'''\n__author__ = \"Shiwei Lan\"\n__copyright__ = \"Copyright 2020, The Bayesian STIP project\"\n__license__ = \"GPL\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shiwei Lan\"\n__email__ = \"slan@asu.edu; lanzithinking@outlook.com\"\n\nimport dolfin as dl\nimport ufl\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\nimport os\nsys.path.append( os.environ.get('HIPPYLIB_BASE_DIR', \"../../\") )\nfrom hippylib import *\nfrom pde import TimeDependentAD\n\nsys.path.append( \"../\" )\nfrom util.common_colorbar import common_colorbar\n\nclass SpaceTimePointwiseStateObservation(Misfit):\n \"\"\"\n Misfit (negative loglikelihood) of Advection-Diffusion inverse problem\n \"\"\"\n def __init__(self, Vh, observation_times=None, targets=None, rel_noise = 0.01, d = None, **kwargs):\n \"\"\"\n Initialize the misfit\n \"\"\"\n # function space\n self.Vh = Vh\n self.mpi_comm = self.Vh.mesh().mpi_comm()\n self.rank = dl.MPI.rank(self.mpi_comm)\n # observation times\n if observation_times is None:\n t_init = 0.\n t_final = 4.\n t_1 = 1.\n dt = .1\n observation_dt = .2\n self.observation_times = np.arange(t_1, t_final+.5*dt, observation_dt)\n else:\n self.observation_times = observation_times\n # observation locations\n self.targets = np.loadtxt('targets.txt') if targets is None else targets\n self.rel_noise = rel_noise\n # obtain observations\n if d is None:\n d=self.get_observations(pde=kwargs.pop('pde',None), nref=kwargs.pop('nref',0), init=kwargs.pop('pde',None))\n if self.rank == 0:\n sep = \"\\n\"+\"#\"*80+\"\\n\"\n print( sep, \"Generate synthetic observations at {0} locations for {1} time points\".format(self.targets.shape[0], len(self.observation_times)), sep )\n # reset observation container for reference\n self.prep_container()\n self.d.axpy(1., d)\n \n def prep_container(self, Vh=None):\n \"\"\"\n Prepare storage of the observations\n \"\"\"\n if Vh is None:\n Vh = self.Vh\n # storage for observations\n self.B = assemblePointwiseObservation(Vh, self.targets)\n self.d = TimeDependentVector(self.observation_times)\n self.d.initialize(self.B, 0)\n ## TEMP Vars\n self.u_snapshot = dl.Vector()\n self.Bu_snapshot = dl.Vector()\n self.d_snapshot = dl.Vector()\n self.B.init_vector(self.u_snapshot, 1)\n self.B.init_vector(self.Bu_snapshot, 0)\n self.B.init_vector(self.d_snapshot, 0)\n \n def get_observations(self, pde=None, nref=0, init=None):\n \"\"\"\n Get the observations at given locations and time points\n \"\"\"\n # pde for observations\n if pde is None:\n mesh = self.Vh.mesh()\n for i in range(nref): mesh = dl.refine(mesh) # refine mesh to obtain observations\n pde = TimeDependentAD(mesh)\n elif nref>0:\n mesh = pde.mesh\n for i in range(nref): mesh = dl.refine(mesh) # refine mesh to obtain observations\n pde = TimeDependentAD(mesh)\n # initial condition\n if init is None:\n true_init = dl.Expression('min(0.5,exp(-100*(pow(x[0]-0.35,2) + pow(x[1]-0.7,2))))', element=pde.Vh[STATE].ufl_element())\n init = dl.interpolate(true_init, pde.Vh[STATE]).vector()\n # prepare container for observations\n self.prep_container(pde.Vh[STATE])\n \n utrue = pde.generate_vector(STATE)\n x = [utrue, init, None]\n pde.solveFwd(x[STATE], x)\n self.observe(x, self.d)\n MAX = self.d.norm(\"linf\", \"linf\")\n noise_std_dev = self.rel_noise * MAX\n parRandom.normal_perturb(noise_std_dev,self.d)\n self.noise_variance = noise_std_dev*noise_std_dev\n return self.d.copy()\n \n def observe(self, x, obs):\n \"\"\"\n Observation operator\n \"\"\"\n obs.zero()\n \n for t in self.observation_times:\n x[STATE].retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n obs.store(self.Bu_snapshot, t)\n \n def cost(self, x):\n \"\"\"\n Compute misfit\n \"\"\"\n c = 0\n for t in self.observation_times:\n x[STATE].retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n self.d.retrieve(self.d_snapshot, t)\n self.Bu_snapshot.axpy(-1., self.d_snapshot)\n c += self.Bu_snapshot.inner(self.Bu_snapshot)\n \n return c/(2.*self.noise_variance)\n \n def grad(self, i, x, out):\n \"\"\"\n Compute the gradient of misfit\n \"\"\"\n out.zero()\n if i == STATE:\n for t in self.observation_times:\n x[STATE].retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n self.d.retrieve(self.d_snapshot, t)\n self.Bu_snapshot.axpy(-1., self.d_snapshot)\n self.Bu_snapshot *= 1./self.noise_variance\n self.B.transpmult(self.Bu_snapshot, self.u_snapshot) \n out.store(self.u_snapshot, t) \n else:\n pass\n \n def setLinearizationPoint(self, x, gauss_newton_approx=False):\n pass\n \n def apply_ij(self, i,j, direction, out):\n out.zero()\n if i == STATE and j == STATE:\n for t in self.observation_times:\n direction.retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n self.Bu_snapshot *= 1./self.noise_variance\n self.B.transpmult(self.Bu_snapshot, self.u_snapshot) \n out.store(self.u_snapshot, t)\n else:\n pass \n \n def applyWuu(self, du, out):\n out.zero()\n self.apply_ij(STATE, STATE, du, out)\n \n def applyWum(self, dm, out):\n out.zero()\n \n def applyWmu(self, du, out):\n out.zero()\n \n def applyWmm(self, dm, out):\n out.zero()\n \n def plot_data(self, times, figsz=(12,5)):\n \"\"\"\n Plot the observations with its values u(x, t) at fixed locations for given time points\n \"\"\"\n n=len(times)\n nrow=np.floor(np.sqrt(n)).astype('int')\n ncol=np.ceil(np.sqrt(n)).astype('int')\n fig,axes=plt.subplots(nrows=nrow,ncols=ncol,sharex=True,sharey=True,figsize=figsz)\n sub_figs = [None]*len(axes.flat)\n for i in range(n):\n plt.axes(axes.flat[i])\n dl.plot(self.Vh.mesh())\n sub_figs[i]=plt.scatter(self.targets[:,0],self.targets[:,1], c=self.d.data[np.where(np.isclose(self.d.times,times[i]))[0][0]], zorder=2)\n# plt.xlim(0,1); plt.ylim(0,1)\n# plt.gca().set_aspect('equal', 'box')\n plt.title('Time: {:.1f} s'.format(times[i],))\n fig=common_colorbar(fig,axes,sub_figs)\n return fig\n \nif __name__ == '__main__':\n np.random.seed(2020)\n# # define pde\n meshsz = (61,61)\n eldeg = 1\n pde = TimeDependentAD(mesh=meshsz, eldeg=eldeg)\n Vh = pde.Vh[STATE]\n # obtain function space\n# mesh = dl.Mesh('ad_10k.xml')\n# Vh = dl.FunctionSpace(mesh, \"Lagrange\", 2)\n # set observation times\n t_init = 0.\n t_final = 4.\n t_1 = 1.\n dt = .1\n observation_dt = .2\n observation_times = np.arange(t_1, t_final+.5*dt, observation_dt)\n # set observation locations\n targets = np.loadtxt('targets.txt')\n # define misfit\n rel_noise = .5\n nref = 1\n misfit = SpaceTimePointwiseStateObservation(Vh, observation_times, targets, rel_noise=rel_noise, nref=nref)\n# # optional: refine mesh to obtain (new) observations\n# rf_mesh = dl.refine(pde.mesh)\n# rf_pde = TimeDependentAD(mesh=rf_mesh)\n# rf_obs = SpaceTimePointwiseStateObservation(rf_pde.Vh[STATE], observation_times, targets, pde=rf_pde).d.copy()\n# misfit.d.zero()\n# misfit.d.axpy(1.,rf_obs)\n # plot observations\n plt_times=[1.,2.,3.,4.]\n fig = misfit.plot_data(plt_times, (10,9))\n plt.subplots_adjust(wspace=0.1, hspace=0.2)\n plt.savefig(os.path.join(os.getcwd(),'properties/obs.png'),bbox_inches='tight')\n ", "repo_name": "lanzithinking/DREAM-BUQ", "sub_path": "ad_diff/misfit.py", "file_name": "misfit.py", "file_ext": "py", "file_size_in_byte": 8786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.path.append", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dolfin.MPI.rank", "line_number": 42, "usage_type": "call"}, {"api_name": "dolfin.MPI", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 54, "usage_type": "call"}, {"api_name": "dolfin.Vector", "line_number": 77, "usage_type": "call"}, {"api_name": "dolfin.Vector", "line_number": 78, "usage_type": "call"}, {"api_name": "dolfin.Vector", "line_number": 79, "usage_type": "call"}, {"api_name": "dolfin.refine", "line_number": 91, "usage_type": "call"}, {"api_name": "pde.TimeDependentAD", "line_number": 92, "usage_type": "call"}, {"api_name": "pde.mesh", "line_number": 94, "usage_type": "attribute"}, {"api_name": "dolfin.refine", "line_number": 95, "usage_type": "call"}, {"api_name": "pde.TimeDependentAD", "line_number": 96, "usage_type": "call"}, {"api_name": "dolfin.Expression", "line_number": 99, "usage_type": "call"}, {"api_name": "pde.Vh", "line_number": 99, "usage_type": "attribute"}, {"api_name": "dolfin.interpolate", "line_number": 100, "usage_type": "call"}, {"api_name": "pde.Vh", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pde.Vh", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pde.generate_vector", "line_number": 104, "usage_type": "call"}, {"api_name": "pde.solveFwd", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "dolfin.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "util.common_colorbar.common_colorbar", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pde.TimeDependentAD", "line_number": 208, "usage_type": "call"}, {"api_name": "pde.Vh", "line_number": 209, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 236, "usage_type": "call"}]} {"seq_id": "12577010060", "text": "import win32com.client\nimport pandas as pd\nfrom datetime import datetime\nfrom com.utils import *\nimport time\n \n# 크레온 플러스 공통 OBJECT\ncpCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')\ncpStatus = win32com.client.Dispatch('CpUtil.CpCybos')\ncpOhlc = win32com.client.Dispatch('CpSysDib.StockChart')\n\ndef get_ohlc(code, qty):\n \"\"\"인자로 받은 종목의 OHLC 가격 정보를 qty 개수만큼 반환한다.\"\"\"\n cpOhlc.SetInputValue(0, code) # 종목코드\n cpOhlc.SetInputValue(1, ord('2')) # 1:기간, 2:개수\n cpOhlc.SetInputValue(4, qty) # 요청개수\n cpOhlc.SetInputValue(5, [0, 2, 3, 4, 5, 8]) # 0:날짜, 2~5:OHLC\n cpOhlc.SetInputValue(6, ord('D')) # D:일단위\n cpOhlc.SetInputValue(9, ord('1')) # 0:무수정주가, 1:수정주가\n cpOhlc.BlockRequest()\n count = cpOhlc.GetHeaderValue(3) # 3:수신개수\n columns = ['open', 'high', 'low', 'close', 'vol']\n index = []\n rows = []\n\n for i in range(count): \n index.append(cpOhlc.GetDataValue(0, i)) \n rows.append([cpOhlc.GetDataValue(1, i), cpOhlc.GetDataValue(2, i),\n cpOhlc.GetDataValue(3, i), cpOhlc.GetDataValue(4, i), cpOhlc.GetDataValue(5, i)]) \n df = pd.DataFrame(rows, columns=columns, index=index) \n return df\n\ndef getVolMax(volarr):\n max = 0\n for vol in volarr:\n if max < vol:\n max = vol\n return max\n\ndef get_movingaverage(code, window):\n \"\"\"인자로 받은 종목에 대한 이동평균가격을 반환한다.\"\"\"\n try:\n time_now = datetime.now()\n str_today = time_now.strftime('%Y%m%d')\n ohlc = get_ohlc(code, window) # 120 + 16 날짜별 데이터 추출\n\n if len(ohlc.index) < window:\n return None, None, None, None\n\n # if str_today == str(ohlc.iloc[0].name):\n # lastday = ohlc.iloc[1].name\n # else:\n # lastday = ohlc.iloc[0].name\n lastday = ohlc.iloc[0].name\n\n closes = ohlc['close'].sort_index() \n vols = ohlc['vol'].sort_index()\n\n ma20 = closes.rolling(20).mean()\n ma60 = closes.rolling(60).mean()\n ma120 = closes.rolling(120).mean()\n bf3d_m20 = ma20.loc[ohlc.iloc[3].name]\n bf3d_m60 = ma60.loc[ohlc.iloc[3].name]\n bf3d_m120 = ma120.loc[ohlc.iloc[3].name]\n bf7d_m20 = ma20.loc[ohlc.iloc[7].name]\n bf7d_m60 = ma60.loc[ohlc.iloc[7].name]\n bf7d_m120 = ma120.loc[ohlc.iloc[7].name]\n bf15d_m20 = ma20.loc[ohlc.iloc[15].name]\n bf15d_m60 = ma60.loc[ohlc.iloc[15].name]\n bf15d_m120 = ma120.loc[ohlc.iloc[15].name]\n \n if round(bf3d_m20, 2) > round(bf3d_m60, 2) and round(bf3d_m60, 2) > round(bf3d_m120, 2) \\\n and round(bf7d_m20, 2) > round(bf7d_m60, 2) and round(bf7d_m60, 2) > round(bf7d_m120, 2) \\\n and round(bf15d_m20, 2) > round(bf15d_m60, 2) and round(bf15d_m60, 2) > round(bf15d_m120, 2):\n\n vol30arr = vols.tail(30).array # 최근 30일 거래량의 최대값 추가\n return code, closes[lastday], vols[lastday], getVolMax(vol30arr)\n else:\n return None, None, None, None\n\n except Exception as ex:\n print(datetime.now().strftime('[%m/%d %H:%M:%S]'), 'get_movingavrg(' + str(window) + ') -> exception! ' + str(ex))\n \n return None\n\nclass CMarketTotal():\n def __init__(self):\n self.dataInfo = {}\n self.targetItems = {}\n \n self.targetItems['code'] = []\n self.targetItems['name'] = []\n self.targetItems['lastclose'] = []\n self.targetItems['vol'] = []\n self.targetItems['sprice'] = []\n self.targetItems['lastmaxvol'] = []\n \n def get_target_items(self):\n codeList = cpCodeMgr.GetStockListByMarket(1) # 거래소\n codeList2 = cpCodeMgr.GetStockListByMarket(2) # 코스닥\n allcodelist = codeList + codeList2\n #print('전 종목 코드 %d, 거래소 %d, 코스닥 %d' % (len(allcodelist), len(codeList), len(codeList2)))\n \n objMarket = CpMarketEye()\n rqCodeList = []\n for i, code in enumerate(allcodelist):\n rqCodeList.append(code)\n if len(rqCodeList) == 200:\n time.sleep(1)\n objMarket.request(rqCodeList, self.dataInfo)\n rqCodeList = []\n continue\n \n if len(rqCodeList) > 0:\n objMarket.request(rqCodeList, self.dataInfo)\n\n # print(self.dataInfo)\n for key in self.dataInfo.keys():\n finalcode, close, vol, vol30max = get_movingaverage(key, 136)\n if finalcode:\n self.targetItems['code'].append(finalcode)\n self.targetItems['name'].append(cpCodeMgr.CodeToName(finalcode))\n self.targetItems['lastclose'].append(close)\n self.targetItems['vol'].append(vol)\n self.targetItems['lastmaxvol'].append(vol30max) \n\n return self.targetItems\n #slack.chat.post_message('#stock', ' '.join(self.targetItems))\n\nclass CpMarketEye:\n def __init__(self):\n self.objRq = win32com.client.Dispatch(\"CpSysDib.MarketEye\")\n self.RpFiledIndex = 0\n \n def request(self, codes, dataInfo):\n # 0: 종목코드 4: 현재가 10:거래량 22 : 전일거래량, 23: 전일종가\n rqField = [0, 4, 10, 22, 23] # 요청 필드\n \n self.objRq.SetInputValue(0, rqField) # 요청 필드\n self.objRq.SetInputValue(1, codes) # 종목코드 or 종목코드 리스트\n self.objRq.BlockRequest()\n \n # 현재가 통신 및 통신 에러 처리\n rqStatus = self.objRq.GetDibStatus()\n if rqStatus != 0:\n return False\n \n cnt = self.objRq.GetHeaderValue(2) # 0 : 필드개수, 1: 필드명배열, 2: 종목개수\n \n for i in range(cnt):\n code = self.objRq.GetDataValue(0, i) # 코드\n cur_price = self.objRq.GetDataValue(1, i) # 현재가\n trade_amt = self.objRq.GetDataValue(2, i) # 거래량\n bf_trade_amt = self.objRq.GetDataValue(3, i) # 전일 거래량\n bf_price = self.objRq.GetDataValue(4, i) # 전일 종가\n\n ## TODO : 전일 or 당일 오전 조회에 따른 판단 분기\n\n # 1. 전일 종가 대비 11% 이상 급등 \n per = 0\n if bf_price > 0:\n per = ((cur_price - bf_price) / bf_price) * 100.0\n\n # 2. 전일 거래량 조건\n if trade_amt > 2000000 and per > 11.0:\n dataInfo[code] = (cur_price, trade_amt)\n \n return True\n\n", "repo_name": "mongma-n/python-stock-toy-project-with-creon", "sub_path": "trade/collector.py", "file_name": "collector.py", "file_ext": "py", "file_size_in_byte": 6641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "win32com.client.client.Dispatch", "line_number": 8, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 8, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 8, "usage_type": "name"}, {"api_name": "win32com.client.client.Dispatch", "line_number": 9, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 9, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 9, "usage_type": "name"}, {"api_name": "win32com.client.client.Dispatch", "line_number": 10, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 10, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 10, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 109, "usage_type": "call"}, {"api_name": "win32com.client.client.Dispatch", "line_number": 132, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 132, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 132, "usage_type": "name"}]} {"seq_id": "37443926896", "text": "import openrouteservice\r\nimport folium\r\nimport random\r\nimport json\r\nimport pprint\r\nimport os\r\n\r\ndef rand_coord_in_range(x, y):\r\n x_int = int(x * 10000000)\r\n y_int = int(y * 10000000)\r\n rand = random.randrange(min(x_int, y_int), max(x_int, y_int))\r\n return rand / 10000000\r\n\r\n\r\nLONDON_BBOX = [[-81.398485, 43.006537], [-81.122701, 42.945282]]\r\n\r\n\"\"\"\r\nclient_local = openrouteservice.Client(base_url='http://localhost:5000') # Specify your personal API key\r\n\r\npois = client_local.places(request='pois', bbox=LONDON_BBOX, filter_category_ids=[596], validate=False)\r\n\r\nstation_locations = [station['geometry']['coordinates'] for station in pois['features']]\r\n\"\"\"\r\n\r\n# get locations and prices from pre-made json file\r\n\r\nprice_file = open('station_prices_reduced_network.json')\r\n\r\nprice_data = json.load(price_file)\r\n\r\nprint(str(len(price_data)))\r\n\r\nstation_locations = [station['coordinates'] for station in price_data]\r\n\r\ntrue_costco_index = next((z for z, st in enumerate(price_data) if st['price'] == 159.9), -1)\r\n\r\nclient_local_ors = openrouteservice.Client(base_url='http://localhost:8080/ors')\r\n\r\ndef simulate_best_station():\r\n vehicle_category = random.random() # generate a random between 0 and 1 and use tranches to determine vehicle type\r\n\r\n if vehicle_category < .17:\r\n # Vehicle is a truck\r\n BASE_FILL_L = 95 * 0.8\r\n FUEL_BURN_L_PER_100KM = 14\r\n elif vehicle_category < (.17 + .47):\r\n # Vehicle is an SUV\r\n BASE_FILL_L = 60 * 0.8\r\n FUEL_BURN_L_PER_100KM = 9\r\n elif vehicle_category < (.17 + .47 + .29):\r\n # Vehicle is a car\r\n BASE_FILL_L = 45 * 0.8\r\n FUEL_BURN_L_PER_100KM = 7\r\n else:\r\n # Vehicle is a van\r\n BASE_FILL_L = 76 * 0.8\r\n FUEL_BURN_L_PER_100KM = 12\r\n\r\n starting_location = [rand_coord_in_range(LONDON_BBOX[0][0], LONDON_BBOX[1][0]), rand_coord_in_range(LONDON_BBOX[0][1], LONDON_BBOX[1][1])]\r\n\r\n temp_station_locations = station_locations.copy()\r\n temp_price_data = price_data.copy()\r\n\r\n removed_costco = False\r\n if random.random() < (1.0 if os.getenv('REMOVE_COSTCO', False) else 0.5):\r\n temp_station_locations.pop(true_costco_index)\r\n temp_price_data.pop(true_costco_index)\r\n removed_costco = True\r\n\r\n locations = [starting_location] + temp_station_locations\r\n\r\n matrix = client_local_ors.distance_matrix(locations=locations, destinations=list(range(1, len(locations))), sources=[0], profile='driving-car', metrics=['distance'], validate=False)\r\n\r\n closest_station_index = matrix['distances'][0].index(min(matrix['distances'][0]))\r\n\r\n cost_matrix = []\r\n fuel_burn_matrix = []\r\n\r\n for i in range(len(temp_price_data)):\r\n station = temp_price_data[i]\r\n base_cost = BASE_FILL_L * station['price']\r\n on_route_fuel_burn = matrix['distances'][0][i] / 1000 / 100 * FUEL_BURN_L_PER_100KM * 2 # the distance is in meters so we convert to km, then we see what portion of 100 that is multiplied by fuel burn. We multiply by 2 to consider the return fuel burn\r\n on_route_fuel_cost = on_route_fuel_burn * station['price']\r\n total_fuel_cost = on_route_fuel_cost + base_cost\r\n cost_matrix.append(total_fuel_cost)\r\n fuel_burn_matrix.append(on_route_fuel_burn + BASE_FILL_L)\r\n\r\n lowest_cost_station_index = cost_matrix.index(min(cost_matrix))\r\n\r\n # note that the map will show the last route decision where this function was run multiple times\r\n # we use reversed here to reverse coords because the map uses lat, lon and the rest of the apis use lon,lat\r\n m = folium.Map(location=list(reversed(starting_location)))\r\n\r\n folium.Marker(location=list(reversed(starting_location)), popup=\"starting point\", icon=folium.Icon(color=\"green\")).add_to(m)\r\n\r\n folium.Marker(location=list(reversed(temp_station_locations[closest_station_index])), popup=\"Nearest station \\n Price: \" + str(temp_price_data[closest_station_index]['price']), icon=folium.Icon(color=\"blue\")).add_to(m)\r\n folium.Marker(location=list(reversed(temp_station_locations[lowest_cost_station_index])), popup=\"Cheapest station \\n Price: \" + str(temp_price_data[lowest_cost_station_index]['price']), icon=folium.Icon(color=\"red\")).add_to(m)\r\n\r\n\r\n m.save('map.html')\r\n\r\n return { 'nearest_price': temp_price_data[closest_station_index]['price'], 'cheapest_price': temp_price_data[lowest_cost_station_index]['price'], 'additional_burn_l': fuel_burn_matrix[lowest_cost_station_index] - fuel_burn_matrix[closest_station_index], 'total_savings': (cost_matrix[closest_station_index] - cost_matrix[lowest_cost_station_index]) / 100, 'nearest_is_best': closest_station_index == lowest_cost_station_index, 'costco_is_best': lowest_cost_station_index == true_costco_index and not removed_costco }\r\n\r\ntotal_additional_burn = 0\r\ntotal_savings = 0\r\nnearest_best = 0\r\ncostco_best = 0\r\n\r\nTOTAL_RUNS = 10000\r\n\r\nfor run in range(TOTAL_RUNS):\r\n result = simulate_best_station()\r\n total_additional_burn += result['additional_burn_l']\r\n total_savings += result['total_savings']\r\n if result['nearest_is_best']:\r\n nearest_best += 1\r\n if result['costco_is_best']:\r\n costco_best += 1\r\n\r\nprint('The total additional fuel burn was ' + str(total_additional_burn) + ' which saved consumers $' + str(round(total_savings, 2)) + ' a lower-priced station was optimal for ' + str(round((1 - (nearest_best / TOTAL_RUNS)) * 100, 1)) + ' % of drivers. ' + str(round(costco_best / TOTAL_RUNS * 100, 1)) + ' % of drivers would choose costco.')\r\n", "repo_name": "rye761/GasSim", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 5536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "random.randrange", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "openrouteservice.Client", "line_number": 37, "usage_type": "call"}, {"api_name": "random.random", "line_number": 40, "usage_type": "call"}, {"api_name": "random.random", "line_number": 65, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 65, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 92, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 94, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 94, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 96, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 96, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 97, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 97, "usage_type": "call"}]} {"seq_id": "16080802316", "text": "from os import stat\nfrom fastapi import Request\nfrom fastapi import APIRouter\nfrom SQL.BankDBManager import BankDBManager\nfrom dto.Transaction import Transaction\nfrom datetime import datetime\nfrom fastapi import FastAPI, HTTPException\nbank_db_manager = BankDBManager()\n\nrouter = APIRouter(\n prefix=\"/transactions\",\n tags=[\"transactions\"]\n)\n\n\n@router.get('/balance')\ndef get_balance():\n balance_as_array = bank_db_manager.get_balance()\n return balance_as_array[0]\n\n@router.get('/')\ndef get_transactions(category=\"\", date=\"\", amount=\"\"):\n tranactions_from_db = bank_db_manager.get_transactions()\n list_of_all_transactions: list[Transaction] = [\n Transaction(**res) for res in tranactions_from_db]\n\n if category != \"\":\n list_of_all_transactions = [\n t for t in list_of_all_transactions if t.category == category]\n\n if amount != \"\":\n list_of_all_transactions = [\n t for t in list_of_all_transactions if t.amount >= int(amount)]\n\n if date != \"\":\n list_of_all_transactions = [\n t for t in list_of_all_transactions if t.tr_date >= datetime.strptime(date, '%Y-%m-%d').date()]\n\n return list_of_all_transactions\n\n\n@router.post('/')\nasync def add_transaction(request: Request):\n transaction: Transaction = Transaction(**(await request.json()))\n bank_db_manager.add_new_transaction(transaction)\n return transaction\n\n\n@router.delete('/{transactionID}')\nasync def delete_transaction(transactionID: int):\n bank_db_manager.delete_transaction(transactionID)\n", "repo_name": "207Levy/Bank", "sub_path": "Server/router/transaction_route.py", "file_name": "transaction_route.py", "file_ext": "py", "file_size_in_byte": 1545, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "SQL.BankDBManager.BankDBManager", "line_number": 8, "usage_type": "call"}, {"api_name": "fastapi.APIRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "dto.Transaction.Transaction", "line_number": 24, "usage_type": "name"}, {"api_name": "dto.Transaction.Transaction", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 43, "usage_type": "name"}, {"api_name": "dto.Transaction.Transaction", "line_number": 44, "usage_type": "name"}]} {"seq_id": "40533504958", "text": "import tkinter as tk\nimport vlc\n\nclass Screen(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, bg = 'black')\n self.settings = { # Inizialazing dictionary settings\n \"width\" : 1024,\n \"height\" : 576\n }\n self.settings.update(kwargs) # Changing the default settings\n # Open the video source |temporary\n self.video_source = \"./Assets/male_5_10.mp4\"\n\n # Canvas where to draw video output\n self.canvas = tk.Canvas(self, width = self.settings['width'], height = self.settings['height'], bg = \"black\", highlightthickness = 0)\n self.canvas.pack()\n\n # Creating VLC player\n self.instance = vlc.Instance()\n self.player = self.instance.media_player_new()\n\n\n def GetHandle(self):\n # Getting frame ID\n return self.winfo_id()\n\n def play(self, _source):\n # Function to start player from given source\n Media = self.instance.media_new(_source)\n Media.get_mrl()\n self.player.set_media(Media)\n\n #self.player.play()\n self.player.set_hwnd(self.GetHandle())\n self.player.play()", "repo_name": "asmanjitha/Computer_Aided_Smart_Adverticements", "sub_path": "GUI_Interactive_App/VideoScreen.py", "file_name": "VideoScreen.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "tkinter.Frame", "line_number": 4, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 6, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tkinter.Canvas", "line_number": 16, "usage_type": "call"}, {"api_name": "vlc.Instance", "line_number": 20, "usage_type": "call"}]} {"seq_id": "32925431759", "text": "from lib.io import *\nfrom lib.file import *\nfrom lib.hue import *\nfrom requests import get\nfrom urllib import parse\nfrom json import loads\nimport time\nendl = \"\\n\"\n\nclass dork:\n\n\tdef help():\n\t\tprint(\"\")\n\t\tio.row(25, [\"dork -h\", \"Attributes list for this command.\"])\n\t\tio.row(25, [\"dork -l\", \"List all available dork files.\"])\n\t\tio.row(25, [\"dork -r [FILE]\", \"List all dorks within a file.\"])\n\t\tio.row(25, [\"dork -u [FILE] [ID]\", \"Use dork within a file.\"])\n\t\tprint(\"\")\n\n\tdef list():\n\t\tfiles = file.list(\"dorks\")\n\t\tif len(files) != 0:\n\t\t\tprint(\"\")\n\t\t\tfor f in files:\n\t\t\t\tprint(f[0:-4])\n\t\t\tprint(\"\")\n\t\telse:\n\t\t\tio.error(\"There ain't files to show up.\")\n\n\tdef read(f):\n\t\tcontent = file.readfile(\"dorks/\" + f + \".txt\")\n\t\tif content != False:\n\t\t\tif len(content) != 0:\n\t\t\t\tprint(\"\")\n\t\t\t\tio.row(5, [fore.blue + \"ID\", \"DORK\"])\n\t\t\t\ti = 1\n\t\t\t\tfor d in content:\n\t\t\t\t\tio.row(5, [fore.yellow + str(i), fore.white + d])\n\t\t\t\t\tio.prevline(1)\n\t\t\t\t\ti += 1\n\t\t\t\tprint(endl)\n\t\t\telse: io.error(\"There ain't dorks to show up.\")\n\t\telse: io.error(\"File doesn't exist.\")\n\n\tdef search(query):\n\t\t_google = \"https://www.googleapis.com/customsearch/v1\"\n\t\t_params = {\n\t\t\t\"key\": \"AIzaSyANm8farYg7FBUl49FRSMDURp4a7VDEyEY\",\n\t\t\t\"cx\": \"017576662512468239146:omuauf_lfve\",\n\t\t\t\"q\": query\n\t\t}\n\t\t_data = get(_google, params=_params).text\n\t\t_data = loads(_data)\n\t\ttry:\n\t\t\tfor item in _data[\"items\"]:\n\t\t\t\tprint(item[\"link\"])\n\t\t\tprint(\"\")\n\t\t\treturn True\n\t\texcept: return False\n\n\tdef use(f, n):\n\t\tcontent = file.readfile(\"dorks/\" + f + \".txt\")\n\t\tif content != False:\n\t\t\tif len(content) != 0:\n\t\t\t\ti = 1\n\t\t\t\tfor _dork in content:\n\t\t\t\t\tif n == i:\n\t\t\t\t\t\tio.quote(\"Dork: \" + fore.magenta + _dork + fore.white)\n\t\t\t\t\t\tio.prevline(1)\n\t\t\t\t\t\tif not dork.search(_dork):\n\t\t\t\t\t\t\tio.prevline(2)\n\t\t\t\t\t\t\tio.error(\"No results gathered.\")\n\t\t\t\t\t\ti = -1\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse: i += 1\n\t\t\t\tif i != -1: io.error(\"Doesn't exist dork with given ID.\")\n\t\t\telse: io.error(\"This file doesn't contain any dorks to use.\")\n\t\telse: io.error(\"File doesn't exist.\")\n\n\tdef all(f):\n\t\tcontent = file.readfile(\"dorks/\" + f + \".txt\")\n\t\tif content != False:\n\t\t\tif len(content) != 0:\n\t\t\t\tio.quote(\"Gathering dorks data...\")\n\t\t\t\ti = 0\n\t\t\t\tfor _dork in content:\n\t\t\t\t\tif dork.search(_dork): i += 1\n\t\t\t\t\ttime.sleep(5)\n\t\t\t\tif i == 0:\n\t\t\t\t\tio.prevline(2)\n\t\t\t\t\tio.error(\"No results gathered.\")\n\t\t\telse: io.error(\"This file doesn't contain any dorks to use.\")\n\t\telse: io.error(\"File doesn't exist.\")", "repo_name": "skollprog/tochi", "sub_path": "lib/dork.py", "file_name": "dork.py", "file_ext": "py", "file_size_in_byte": 2404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 88, "usage_type": "call"}]} {"seq_id": "6121768640", "text": "#pip install slack_sdk, aiohttp, selenium, bs4, requests\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport json\r\n\r\ndef get_asin(url):\r\n asin = url.split('/')\r\n for i, dp in enumerate(asin):\r\n if dp == \"dp\":\r\n return (asin[i+1])\r\n\r\ndef get_domain(url):\r\n uri = url.split('/')\r\n for i in uri:\r\n if \"amazon\" in i:\r\n return i\r\n print(\"not supported, amazon only\")\r\n exit()\r\n\r\ndef get_itemcode(url):\r\n return get_asin(url)\r\n\r\ndef getprice_amazon(parse_html):\r\n parse_price = parse_html.find(id=\"twister-plus-price-data-price\")\r\n parse_curr = parse_html.find(id=\"twister-plus-price-data-price-unit\")\r\n\r\n if parse_price == None:\r\n item_price = \"none found\"\r\n currency = \"none found\"\r\n else:\r\n item_price = parse_price.get(\"value\")\r\n currency = parse_curr.get(\"value\")\r\n\r\n return item_price, currency\r\n\r\ndef read_html(html_file):\r\n with open(html_file, \"r\", encoding=\"utf-8\") as f:\r\n content = f.read()\r\n\r\n parse_html = BeautifulSoup(content,\"html.parser\", multi_valued_attributes=None)\r\n item_price, currency = getprice_amazon(parse_html)\r\n\r\n return item_price, currency\r\n\r\n\r\ndef send_request(s, prepared):\r\n response = s.send(prepared)\r\n return response\r\n\r\ndef write_response_tofile(response, item_code):\r\n html_file = \"ASIN - \" + item_code + \".html\"\r\n\r\n with open(html_file,\"w\", encoding=\"utf-8\") as f:\r\n f.write(response.text)\r\n return html_file\r\n\r\ndef pretty_print_POST(req):\r\n print('{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\r\n '-----------START-----------',\r\n req.method + ' ' + req.url,\r\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\r\n req.body,\r\n ))\r\n\r\ndef getprice(url):\r\n domain = get_domain(url)\r\n item_code = get_itemcode(url)\r\n\r\n headers = {'authority' :'www.amazon.com', 'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36'}\r\n req = requests.Request(\"GET\", url, headers=headers)\r\n prepared = req.prepare()\r\n s = requests.Session()\r\n pretty_print_POST(prepared)\r\n\r\n response = send_request(s, prepared)\r\n #print(response.text)\r\n #print(response.status_code)\r\n html_file = write_response_tofile(response, item_code)\r\n price_curr = read_html(html_file)\r\n price = price_curr[0]\r\n currency = price_curr[1]\r\n price_curr_json = {\"price\": price,\r\n \"currency\": currency,\r\n \"item\": item_code}\r\n return price_curr_json", "repo_name": "marcus081c/pricecheck", "sub_path": "getpriceamazon.py", "file_name": "getpriceamazon.py", "file_ext": "py", "file_size_in_byte": 2560, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.Request", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 73, "usage_type": "call"}]} {"seq_id": "26330479726", "text": "from datetime import datetime\nfrom flask import render_template, session, redirect, url_for\n\nfrom . import main\nfrom .forms import NewBlogForm, EditContentForm, EditBlogForm, ContentForm\nfrom .. import db\nfrom ..models import Blog, BlogContent\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n blogs = Blog.query.order_by(Blog.timestamp.desc()).all()\n return render_template('index.html', blogs=blogs, current_time=datetime.utcnow(),\n name=session.get('name'), text=session.get('text'))\n\n\n@main.route('/blogs/new', methods=['GET', 'POST'])\ndef new_blog():\n blogform = NewBlogForm()\n if blogform.validate_on_submit():\n blog = Blog(title=blogform.title.data,\n body=blogform.body.data,\n timestamp=datetime.utcnow()\n )\n db.session.add(blog)\n return redirect(url_for('.index'))\n return render_template('newblog.html', blogform=blogform, current_time=datetime.utcnow())\n\n\n@main.route('/blogs/', methods=['GET', 'POST'])\ndef blog(id):\n content_form = ContentForm()\n blog = Blog.query.get_or_404(id)\n session['id'] = blog.id\n if content_form.validate_on_submit():\n new_content = BlogContent(body=content_form.body.data,\n blog=blog,\n timestamp=datetime.utcnow())\n db.session.add(new_content)\n return redirect(url_for('.blog', id=blog.id))\n return render_template('blog.html', blog=blog, content_form=content_form)\n\n\n@main.route('/edit/', methods=['GET', 'POST'])\ndef edit_blog(id):\n blog = Blog.query.get_or_404(id)\n form = EditBlogForm()\n if form.validate_on_submit():\n blog.title = form.title.data\n blog.body = form.body.data\n return redirect(url_for('.blog', id=blog.id))\n form.title.data = blog.title\n form.body.data = blog.body\n return render_template('edit_blog.html', blog=blog, form=form)\n\n\n@main.route('/edit/contents/', methods=['GET', 'POST'])\ndef edit_content(id):\n content = BlogContent.query.get_or_404(id)\n form = EditContentForm()\n if form.validate_on_submit():\n content.body = form.body.data\n return redirect(url_for('.blog', id=session.get('id')))\n form.body.data = content.body\n return render_template('edit_blog.html', content=content, form=form)\n\n\n@main.route('/delete/content/')\ndef delete_content(id):\n content = BlogContent.query.get_or_404(id)\n db.session.delete(content)\n\n return redirect(url_for('.blog', id=session.get('id')))\n\n\n@main.route('/delete/blog/')\ndef delete_blog(id):\n blog = Blog.query.get_or_404(id)\n db.session.delete(blog)\n\n return redirect(url_for('.index'))", "repo_name": "JunliuHub/MyBlog", "sub_path": "src/main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "models.Blog.query.order_by", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Blog.query", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Blog.timestamp.desc", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Blog.timestamp", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 14, "usage_type": "name"}, {"api_name": "forms.NewBlogForm", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Blog", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "forms.ContentForm", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Blog.query.get_or_404", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Blog.query", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 34, "usage_type": "name"}, {"api_name": "models.BlogContent", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Blog.query.get_or_404", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Blog.query", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 46, "usage_type": "name"}, {"api_name": "forms.EditBlogForm", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "models.BlogContent.query.get_or_404", "line_number": 59, "usage_type": "call"}, {"api_name": "models.BlogContent.query", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.BlogContent", "line_number": 59, "usage_type": "name"}, {"api_name": "forms.EditContentForm", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 65, "usage_type": "call"}, {"api_name": "models.BlogContent.query.get_or_404", "line_number": 70, "usage_type": "call"}, {"api_name": "models.BlogContent.query", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.BlogContent", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 73, "usage_type": "name"}, {"api_name": "models.Blog.query.get_or_404", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Blog.query", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 81, "usage_type": "call"}]} {"seq_id": "12089140908", "text": "#!/usr/bin/python3\nimport socket\nimport threading\nimport json\nimport time\nimport traceback\nimport subprocess\nimport os\nimport sys\nimport hashlib\nimport ctypes\n\ndatadir = '/mnt/var'\nbaseurl = 'https://yoomoney.ru/'\n\nlibx11=ctypes.CDLL('libX11.so.6')\nlibxtst=ctypes.CDLL('libXtst.so.6')\ndis=libx11.XOpenDisplay(None)\n\ndef kpress(kcode, d=1, u=1):\n\tsy = libx11.XFlush\n\tif d: libxtst.XTestFakeKeyEvent(dis, kcode, True, 0)\n\tsy(dis)\n\tif u: libxtst.XTestFakeKeyEvent(dis, kcode, False, 0)\n\tsy(dis)\n\nif 'early' in sys.argv:\n\ttime.sleep(10)\n\tkpress(24)\n\tkpress(39)\n\tkpress(36)\n\ttime.sleep(50)\n\t#kpress(71)\n\t#time.sleep(3)\n\tkpress(36)\n\t#time.sleep(5)\n\tkpress(37, 1, 0)\n\tkpress(64, 1, 0)\n\tkpress(28)\n\tkpress(37, 0, 1)\n\tkpress(64, 0, 1)\n\ttime.sleep(15)\n\tkpress(56)\n\tkpress(28)\n\tkpress(36)\n\texit()\n\ntoken1 = '\\r\\nrqbxmvJKSsNevlZDlTkiBktCVNdYWp\\r\\n\\r\\n'\ntoken2 = '/QINdbNFKeGGUmeAnbRhksGOTTZrATR'\n\nquery = []\npayed = []\nbad = []\n\nclass st:\n\twindow = None\n\tcapture = None\n\tmywallet = None\n\tpaymethod = None\n\tid = None\n\tdetails = None\n\tamount = None\n\trenew = False\n\ndef popcode():\n\tyfile = '/mnt/uc/' + st.mywallet\n\tcl = open(yfile).readlines()\n\tc = cl[0].split(' ')[1].strip()\n\topen(yfile, 'w').writelines(cl[1:])\n\tprint(len(cl), 'codes')\n\tst.renew = len(cl) < 7\n\treturn c\n\ndef timer():\n\twhile True:\n\t\ttry:\n\t\t\t#if st.id and st.enter:\n\t\t\t#\ttime.sleep(3)\n\t\t\t#\tif st.id and st.enter:\n\t\t\t#\t\tkpress(71)\n\t\t\t#\t\ttime.sleep(2)\n\t\t\t#\t\tkpress(36)\n\t\t\t#\t\ttime.sleep(3)\n\t\t\t#\tif st.id and st.enter:\n\t\t\t#\t\tkpress(36)\n\t\t\t#\t\ttime.sleep(2)\n\t\t\t#\ttime.sleep(5)\n\t\t\tprocess()\n\t\t\tif st.window and st.window.poll() is not None:\n\t\t\t\tst.window = None\n\t\t\t\tfinish()\n\t\texcept:\n\t\t\tpass\n\t\ttime.sleep(2)\n\nthreading.Thread(target=timer).start()\n\ndef screenrec(id):\n\tos.system('xscreensaver-command -deactivate')\n\tst.capture = subprocess.Popen(['/usr/bin/ffmpeg', '-f', 'x11grab', '-draw_mouse', '1', '-framerate', '25', '-video_size', '1366x768',\n\t\t'-i', ':0+0,0', '-pix_fmt', 'yuv420p', '-c:v', 'libx264', '-preset', 'veryfast', '-q:v', '1', '-s', '1366x768', '-f', 'matroska', \n\t\t'-v', '-8', '/mnt/screenrec/'+id+'.mkv'])\n\ndef process():\n\tif st.id or not query: return\n\tq = query.pop()\n\ta, st.mywallet, st.paymethod, st.id, st.details, st.amount = q\n\tif int(float(st.amount)) == float(st.amount):\n\t\tst.amount = str(int(float(st.amount)))\n\tif os.access(datadir + '/kpress', os.R_OK):\n\t\tkpress(int(open(datadir + '/kpress').read()))\n\t\ttime.sleep(1)\n\tst.window = subprocess.Popen(['xmessage']+q)\n\tbrowser = 'c' + st.mywallet\n\tif st.paymethod == 'billing':\n\t\tsubprocess.Popen([browser, baseurl+'main'])\n\telse:\n\t\tscreenrec(st.id)\n\t\tsubprocess.Popen([browser, {'wallet': baseurl+'transfer/a2w', 'phone': baseurl+'phone'}[st.paymethod]])\n\tst.enter = True\n\ndef finish(renew=True):\n\tif renew and st.renew:\n\t\tbrowser = 'c' + st.mywallet\n\t\tsubprocess.Popen([browser, baseurl+'emergency-codes'])\n\telse:\n\t\tst.id = None\n\t\tst.details = None\n\t\ttry:\n\t\t\tif st.window:\n\t\t\t\tst.window.terminate()\n\t\t\t\tst.window = None\n\t\t\tst.capture.terminate()\n\t\texcept:\n\t\t\tpass\n\t\tprint('finish')\n\t\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(('127.0.0.1', 22222))\ns.listen(1)\nwhile True:\n\ttry:\n\t\tres = ''\n\t\tc, a = s.accept()\n\t\tc.settimeout(5)\n\t\treq = b''\n\t\twhile b'\\r\\n\\r\\n' not in req:\n\t\t\tp = c.recv(8192)\n\t\t\tif not p: break\n\t\t\treq += p\n\t\tq = req.decode()\n\t\t\n\t\tif token1 in q:\n\t\t\tq = q.replace(token1, '').split('\\t')\n\t\t\t\n\t\t\tif q[0] == 'new':\n\t\t\t\tquery.insert(0, q)\n\t\t\t\n\t\t\tif q[0] == 'search':\n\t\t\t\tif len(q) == 3:\n\t\t\t\t\tquery.append(['new', q[2], 'billing', '-1', '0', '0'])\n\t\t\t\telse:\n\t\t\t\t\tif q[1] in payed:\n\t\t\t\t\t\tres = 'READY'\n\t\t\t\t\tif q[1] in bad:\n\t\t\t\t\t\tres = 'bad'\n\t\t\n\t\tif token2 in q:\n\t\t\twhile b'' not in req:\n\t\t\t\tp = c.recv(8192)\n\t\t\t\tif not p: break\n\t\t\t\treq += p\n\t\t\tq = json.loads(req.decode().split('\\r\\n\\r\\n')[1].replace('', ''))\n\t\t\tif q['action'] == 'details' and st.id and q['paymethod'] == st.paymethod:\n\t\t\t\tres = {'details': st.details, 'amount': st.amount}\n\t\t\t\n\t\t\tif q['action'] == 'getcode' and st.id:\n\t\t\t\tif float(q['amount'].replace('\\xa0', '').replace('\\t', '').strip()) <= float(st.amount)*1.03 and q['details'].strip() in st.details:\n\t\t\t\t\tprint('MATCH')\n\t\t\t\t\tres = {'ok': 1, 'c': popcode()}\n\t\t\t\t\tst.details = st.amount = None\n\t\t\t\telse:\n\t\t\t\t\tprint(q)\n\t\t\t\n\t\t\tif q['action'] == 'confirm' and st.id and not st.details:\n\t\t\t\tpayed.append(st.id)\n\t\t\t\tres = {'closed': 1, \n\t\t\t\t\t'redir': os.access(datadir + '/actions-' + st.mywallet, os.R_OK) and len(open(datadir + '/actions-' + st.mywallet).read())}\n\t\t\t\tfinish()\n\t\t\t\t\n\t\t\tif q['action'] == 'bad':\n\t\t\t\tbad.append(st.id)\n\t\t\t\tres = {'saved': 1}\n\t\t\t\tfinish('codeused' in q)\n\t\t\t\n\t\t\tif q['action'] == 'acode':\n\t\t\t\tres = {'c': popcode()}\n\t\t\t\n\t\t\tif q['action'] == 'savecodes':\n\t\t\t\tyfile = '/mnt/uc/' + st.mywallet\n\t\t\t\topen(yfile, 'w').write(q['content'])\n\t\t\t\tres = {'saved': 1}\n\t\t\t\tos.system('e7z /mnt/uc; uz &')\n\t\t\t\tfinish(False)\n\t\t\t\topen(datadir + '/codes-' + st.mywallet, 'w').write(q['content'])\n\t\t\t\n\t\t\tif q['action'] == 'loaded':\n\t\t\t\tst.enter = False\n\t\t\t\tres = 1\n\t\t\t\tprint('loaded')\n\t\t\t\n\t\t\tif 'bal' in q and q['mywallet'].isdigit():\n\t\t\t\trv = 0\n\t\t\t\tw = q['mywallet']\n\t\t\t\tif os.access(datadir + '/actions-' + w, os.R_OK):\n\t\t\t\t\trv = 1\n\t\t\t\t\tassert q['bal'] is not None\n\t\t\t\t\topen(datadir + '/balance-' + w, 'w').write(str(q['bal']))\n\t\t\t\t\told = open(datadir + '/actions-' + w).read().split('\\n')\n\t\t\t\t\tnewact = ''# if ''.join(old) else q['action']\n\t\t\t\t\twhile old:\n\t\t\t\t\t\tif not old[-1].strip():\n\t\t\t\t\t\t\told.pop()\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tcan = (q['action']+'\\r\\r').replace('\\n'.join(old)+'\\n\\r\\r', '')\n\t\t\t\t\t\tif '\\r\\r' not in can:\n\t\t\t\t\t\t\tnewact = can\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\told.pop()\n\t\t\t\t\topen(datadir + '/actions-' + w, 'w').write(q['action'])\n\t\t\t\t\tfor act in newact.split('\\n'):\n\t\t\t\t\t\tprint(act)\n\t\t\t\t\t\tif not act: continue\n\t\t\t\t\t\tfn = hashlib.sha256(act.encode()).hexdigest() + '.eml'\n\t\t\t\t\t\tact = act.split('\\t')\n\t\t\t\t\t\tamo = str(float(act[1]))\n\t\t\t\t\t\tif act[0] == 'in':\n\t\t\t\t\t\t\topen(datadir + '/incoming-' + w + '/' + fn, 'w').write('in\\n'+amo)\n\t\t\t\t\t\tif act[0] == 'out':\n\t\t\t\t\t\t\topen(datadir + '/transactions-' + w + '/' + fn, 'w').write('out\\n'+amo)\n\t\t\t\t\t\n\t\t\t\tres = {'saved': rv}\n\t\t\t\tif st.paymethod == 'billing' and st.mywallet == w:\n\t\t\t\t\tfinish(False)\n\t\t\n\t\tif res: res = json.dumps(res)\n\t\tc.sendall(('HTTP/1.1 200 OK\\r\\nAccess-Control-Allow-Origin: *\\r\\nConnection: close\\r\\nContent-Length: '+str(len(res))+\n\t\t\t'\\r\\n\\r\\n'+res).encode())\n\t\tc.close()\n\texcept Exception:\n\t\ttraceback.print_exc()\n", "repo_name": "AsgardB/parovoz", "sub_path": "apm/listen.py", "file_name": "listen.py", "file_ext": "py", "file_size_in_byte": 6431, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "ctypes.CDLL", "line_number": 16, "usage_type": "call"}, {"api_name": "ctypes.CDLL", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 96, "usage_type": "call"}, {"api_name": "os.system", "line_number": 99, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 100, "usage_type": "call"}, {"api_name": "os.access", "line_number": 110, "usage_type": "call"}, {"api_name": "os.R_OK", "line_number": 110, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 112, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 113, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 116, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 119, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 125, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 138, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 138, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 138, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 139, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 139, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 174, "usage_type": "call"}, {"api_name": "os.access", "line_number": 189, "usage_type": "call"}, {"api_name": "os.R_OK", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 204, "usage_type": "call"}, {"api_name": "os.access", "line_number": 216, "usage_type": "call"}, {"api_name": "os.R_OK", "line_number": 216, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 235, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 247, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 252, "usage_type": "call"}]} {"seq_id": "26415077614", "text": "import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nexcel_file = \"血常规数据整合.xlsx\"\r\ndata = pd.read_excel(excel_file)\r\n\r\nX = data[['WBC', 'LY','GR','MO','RBC','Hgb','HCT','MCV','MCH','RDW','PLT','PCT','MPV','PDW']].values\r\nY = data['result'].values\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=50)\r\n\r\nk = 10\r\nclsf = KNeighborsClassifier(n_neighbors=k)\r\nclsf.fit(X_train,Y_train)\r\n\r\nY_pred = clsf.predict(X_test)\r\nprint(Y_pred)\r\nprint(Y_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nacc = accuracy_score(Y_test,Y_pred)\r\nprint(acc)\r\n", "repo_name": "KaFuuchao0313/BloodExamProject", "sub_path": "py/KNN_basic.py", "file_name": "KNN_basic.py", "file_ext": "py", "file_size_in_byte": 689, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "pandas.read_excel", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 23, "usage_type": "call"}]} {"seq_id": "16852461173", "text": "from numpy import array\nfrom os import listdir\nfrom pickle import load\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical, plot_model\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Dense, LSTM, Embedding, Dropout\nfrom keras.layers.merge import add\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.applications.vgg16 import preprocess_input\n\ndef load_doc(filename):\n with open(filename, 'r') as f:\n text = f.read()\n return text\n\ndef load_set(filename):\n doc = load_doc(filename)\n dataset = set()\n for line in doc.split('\\n'):\n if len(line) < 1:\n continue\n key = line.split('.')[0]\n dataset.add(key)\n return dataset\n\n# load clean descriptions into memory\ndef load_clean_descriptions(filename, dataset):\n doc = load_doc(filename)\n texts = {}\n for line in doc.split('\\n'):\n if len(line) == 0:\n continue\n tokens = line.split()\n image_id, text = tokens[0], tokens[1:]\n if image_id in dataset:\n desc = 'BOS ' + ' '.join(text) + ' EOS'\n texts.setdefault(image_id, []).append(desc)\n return texts\n\n#load photo features\ndef load_photo_features(filename, dataset):\n with open(filename, 'rb') as f:\n all_features = load(f)\n features = {key: all_features[key] for key in dataset}\n return features\n\n# convert a dictionary of clean descriptions to list\ndef to_lines(descriptions):\n all_desc = []\n for key in descriptions.keys():\n all_desc += descriptions[key]\n return all_desc\n\n# fit a tokenizer given caption descriptions\ndef create_tokenizer(descriptions):\n lines = to_lines(descriptions)\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(lines)\n return tokenizer\n\ndef max_length(descriptions):\n lines = to_lines(descriptions)\n return max(map(lambda d: len(d.split()), lines))\n\ndef create_sequences(tokenizer, max_length, descriptions, photos):\n X1, X2, y = list(), list(), list()\n # walk through each image identifier\n for key, desc_list in descriptions.items():\n # walk through each description for the image\n for desc in desc_list:\n # encode the sequence\n seq = tokenizer.texts_to_sequences([desc])[0]\n # split one sequence into multiple X,y pairs\n for i in range(1, len(seq)):\n # split into input and output pair\n in_seq, out_seq = seq[:i], seq[i]\n # pad input sequence\n in_seq = pad_sequences([in_seq], maxlen=max_length)[0]\n # encode output sequence\n out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n # store\n X1.append(photos[key][0])\n X2.append(in_seq)\n y.append(out_seq)\n return array(X1), array(X2), array(y)\n\ndef create_sequences_single(tokenizer, max_length, desc_list, photo):\n X1, X2, y = [], [], []\n vocab_size = len(tokenizer.word_index)+1\n for desc in desc_list:\n seq = tokenizer.texts_to_sequences([desc])[0]\n for i in range(1, len(seq)):\n in_seq, out_seq = seq[:i], seq[i]\n in_seq = pad_sequences([in_seq], maxlen = max_length)[0]\n out_seq = to_categorical([out_seq], num_classes = vocab_size)[0]\n X1.append(photo[0])\n X2.append(in_seq)\n y.append(out_seq)\n return [array(X1), array(X2), array(y)]\n\ndef define_model(vocab_size, max_length, filename = None):\n # feature extractor model\n inputs1 = Input(shape=(4096,))\n fe1 = Dropout(0.5)(inputs1)\n fe2 = Dense(256, activation='relu')(fe1)\n # sequence model\n inputs2 = Input(shape=(max_length,))\n se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)\n se2 = Dropout(0.5)(se1)\n se3 = LSTM(256)(se2)\n\n # decoder model\n decoder1 = add([fe2, se3])\n decoder2 = Dense(256, activation='relu')(decoder1)\n outputs = Dense(vocab_size, activation='softmax')(decoder2)\n \n \n # tie it together [image, seq] [word]\n model = Model(inputs=[inputs1, inputs2], outputs=outputs)\n if filename:\n model = load_model(filename)\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n \n # summarize model\n # print(model.summary())\n # plot_model(model, to_file='model.png', show_shapes=True)\n return model\n\n# train dataset\n\n# load training dataset (6K)\nfilename = 'Flickr8k_text/Flickr_8k.trainImages.txt'\ntrain = load_set(filename)\nprint('Dataset: %d' % len(train))\n# descriptions\ntrain_descriptions = load_clean_descriptions('descriptions.txt', train)\nprint('Descriptions: train=%d' % len(train_descriptions))\n# photo features\ntrain_features = load_photo_features('features.pkl', train)\nprint('Photos: train=%d' % len(train_features))\n# prepare tokenizer\ntokenizer = create_tokenizer(train_descriptions)\nvocab_size = len(tokenizer.word_index)+1\nprint('Vocabulary Size: %d' % vocab_size)\n\n# determine max sequence length\nmax_length = max_length(train_descriptions)\nprint('Description Length: %d' % max_length)\n'''\n# prepare sequences\nX1train, X2train, ytrain = create_sequences(tokenizer, max_length, train_descriptions, train_features)\n'''\ndef data_generator(descriptions, tokenizer, max_length):\n directory = 'Flicker8k_Dataset'\n while True:\n for image_id in train:\n image = train_features[image_id]\n desc = descriptions[image_id]\n in_img, in_seq, out_word = create_sequences_single(tokenizer, max_length, desc, image)\n yield[[in_img, in_seq], out_word]\n# dev dataset\n\n# load test set\nfilename = 'Flickr8k_text/Flickr_8k.devImages.txt'\ntest = load_set(filename)\nprint('Dataset: %d' % len(test))\n# descriptions\ntest_descriptions = load_clean_descriptions('descriptions.txt', test)\nprint('Descriptions: test=%d' % len(test_descriptions))\n# photo features\ntest_features = load_photo_features('features.pkl', test)\nprint('Photos: test=%d' % len(test_features))\n# prepare sequences\nX1test, X2test, ytest = create_sequences(tokenizer, max_length, test_descriptions, test_features)\n\n# fit model\n\nmodel = define_model(vocab_size, max_length, 'model-ep010-loss3.900-val_loss3.950.h5')\n\nfilepath = 'model-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n\n# fit model\nmodel.fit_generator(data_generator(train_descriptions, tokenizer, max_length), epochs=80, steps_per_epoch=1200, callbacks=[checkpoint], validation_data=([X1test, X2test], ytest))\n\n\n\n\n\n\n\n", "repo_name": "tyge318/Keras-LSTM-Exercise", "sub_path": "Captions/caption_train.py", "file_name": "caption_train.py", "file_ext": "py", "file_size_in_byte": 6722, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "pickle.load", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.layers.merge.add", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 124, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 184, "usage_type": "call"}]} {"seq_id": "14194124988", "text": "import os\nfrom os.path import join as pjoin\nimport json\nfilePath = './static/ocr/'\nfor i,j,k in os.walk(filePath):\n if i==\"./static/ocr/\" :\n continue\n else:\n path1 = i\n path2 = path1[12:]\n path3 = path1+path2\n jsonpath =path3+\".json\"\n jpgpath = path3+\".jpg\"\n fr = open(pjoin(jsonpath))\n model=json.load(fr)\n fr.close()\n string = {\"jpgpath\":jpgpath}\n\n for i in string:\n model[i] = string[i]\n jsObj = json.dumps(model)\n\n with open(pjoin(jsonpath), \"w\") as fw:\n fw.write(jsObj)\n fw.close()", "repo_name": "jingyoushui/Search", "sub_path": "pdfprocess.py", "file_name": "pdfprocess.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.walk", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 15, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}]} {"seq_id": "34432829666", "text": "import h5py\nimport numpy as np\nimport sys\n\n#my own toolkit\nimport HiCutils\nimport utils\nimport convert\n\n### YOU ARE SUPPOSED TO ONLY MODIFY VALUE HERE ###\n#input file\nbedfilename='/users/invites/carron/Documents/Boost-HiC/test_dataset/mouse10kb_fend.bed'\nmatrixfilename='/users/invites/carron/Documents/Boost-HiC/test_dataset/chr16ES_10000.matrix'\nOperation='Sample'\nrepositoryout='/run/media/carron/0ac0fffa-3350-431d-b1d1-865f8a21db21/data/Hi-C/Mouse/boyan/test/'\n\n#default parameter\nresolution=10000 #default : 10kb\nachr=\"chr16\"\nalpha=0.2 #AFTER a lot of test : 0.24 is always a good and safe compromise, you must use this value\n###\n\n\ndef BoostHiC(amat):\n\tnormmat=HiCutils.SCN(np.copy(amat))\n\tFFmat=np.power(HiCutils.fastFloyd(1/np.power(normmat.copy(),alpha)),-1/alpha) #to dist, FF, to contact in one line\n\tboostedmat=HiCutils.adjustPdS(normmat,FFmat)\n\treturn boostedmat\n\ndef Sample(amat,repositoryout):\n\tpercentofsample=[0.1,1.,10.]\n\tfor j in percentofsample:\n\t\tprint(\"Value of sample\",j)\n\t\tchrmat_s=np.copy(amat)\n\t\tchrmat=HiCutils.downsample_basic(chrmat_s,j)\n\t\tfh5 = h5py.File(repositoryout+\"inputmat_sampleat_\"+str(j)+\"_percent.hdf5\", \"w\")\n\t\tfh5['data'] = chrmat\n\t\tfh5.close()\n\n\n\n### CODE EXECUTION ###\n\n# load the data\nprint(\"LOADING MATRIX\")\nD=convert.loadabsdatafile(bedfilename)\nbeginfend=D[achr][0]\nendfend=D[achr][1]\nprint(\"Data fend :\",beginfend,endfend)\nbasemat=convert.loadmatrixselected(matrixfilename,beginfend,endfend)\n\n#matrix filtering\nprint(\"FILTERING\")\npos_out=HiCutils.get_outliers(basemat)\nbasematfilter=basemat[np.ix_(~pos_out, ~pos_out)]\nbasematfilter=np.copy(basematfilter)\n#basematfilter=basematfilter[0:1000,0:1000]\nprint(len(basemat),len(basematfilter))\nfh5 = h5py.File(repositoryout+\"inputmat.hdf5\", \"w\")\nfh5['data'] = basemat\nfh5.close()\nfh5 = h5py.File(repositoryout+\"inputmat_filtered.hdf5\", \"w\")\nfh5['data']=basematfilter\nfh5.close()\nutils.savematrixasfilelist3(pos_out,repositoryout+\"filteredbin.txt\")\n\nif Operation==\"Boost\":\n\tprint(\"Boost Hic\")\n\tboosted=BoostHiC(basematfilter)\n\t#save\n\tfh5 = h5py.File(repositoryout+\"boostedmat.hdf5\", \"w\")\n\tfh5['data']=boosted\n\tfh5.close()\nelif Operation==\"Sample\":\n\tprint(\"SAMPLING\")\n\tSample(basematfilter,repositoryout)\n\n\n\n\n", "repo_name": "LeopoldC/Boost-HiC", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2200, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "46", "api": [{"api_name": "HiCutils.SCN", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 26, "usage_type": "call"}, {"api_name": "HiCutils.fastFloyd", "line_number": 26, "usage_type": "call"}, {"api_name": "HiCutils.adjustPdS", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 34, "usage_type": "call"}, {"api_name": "HiCutils.downsample_basic", "line_number": 35, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 36, "usage_type": "call"}, {"api_name": "convert.loadabsdatafile", "line_number": 46, "usage_type": "call"}, {"api_name": "convert.loadmatrixselected", "line_number": 50, "usage_type": "call"}, {"api_name": "HiCutils.get_outliers", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 56, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 59, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.savematrixasfilelist3", "line_number": 65, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 71, "usage_type": "call"}]} {"seq_id": "70936434701", "text": "import sqlite3\nimport re\nimport datetime\nimport random\ndef login_server(username=None,password=None):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select * from user where username=? and password=?',(username,password))\n data=cursor.fetchall()\n conn.close()\n if data:\n return data[0]\n else:\n return '账号或密码错误'\n\ndef register_server(**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n reg= re.compile(r'^1[34578][0-9]{9}$')\n if not reg.match(data['telephone'][0]):\n return \"请填写正确的手机号码\"\n try:\n cursor.execute('insert into user (username,password,createtime,name,\\\n telephone,QQ,email)values(?,?,?,?,?,?,?)',\\\n (data['username'][0],data['password'][0],\\\n datetime.date.today(),data['name'][0],\\\n data['telephone'][0],data['QQ'][0],data['email'][0]))\n except sqlite3.IntegrityError:\n conn.close()\n return \"用户名重复\"\n conn.commit()\n conn.close()\n return \"注册成功\"\n\ndef pre_server(**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n reg= re.compile(r'^1[34578][0-9]{9}$')\n if not reg.match(data['telephone'][0]):\n return \"请填写正确的手机号码\"\n try:\n cursor.execute('insert into pre (name,telephone,createtime,\\\n QQ,email)values(?,?,?,?,?)',\\\n (data['name'][0],data['telephone'][0],\\\n datetime.date.today(),data['QQ'][0],\\\n data['email'][0]))\n except sqlite3.IntegrityError:\n conn.close()\n return \"手机号码重复\"\n conn.commit()\n conn.close()\n return \"预报名成功\"\n\ndef score_server(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select real_score,virtual_score from user where id=?',(user_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef update_user_question(**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('insert into user_questions (user_id,question_id,result,\\\n choice) values(?,?,?,?)',\\\n (data['userId'][0],data['questionId'][0],\\\n data['answerResult'][0],data['userChoice'][0]))\n conn.commit()\n conn.close()\n\ndef get_user_question(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select question_id from user_questions where user_id=?',(user_id,))\n done=[x[0] for x in cursor.fetchall()]\n cursor.execute('select id from questions')\n questions= [x[0] for x in cursor.fetchall()]\n conn.close()\n undone=[]\n for q in questions:\n if q not in done:\n undone.append(q)\n r=random.choice(undone)\n return [r,get_question(r),len(undone)-1]\n\n\ndef get_question(question_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select title,A,B,C,D,explain,answer from questions where id=?',(question_id,))\n data,=cursor.fetchall()\n conn.close()\n return data\n\ndef score_detail(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,status,id\\\n from re where user_id=?',(user_id,))\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef recommend_server(user_id,**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n reg= re.compile(r'^1[34578][0-9]{9}$')\n if not reg.match(data['re_telephone'][0]):\n return \"请填写正确的手机号码\"\n try:\n cursor.execute('insert into re (createtime,name,telephone,email,\\\n user_id)values(?,?,?,?,?)',\\\n (datetime.date.today(),data['re_name'][0],data['re_telephone'][0],\\\n data['re_email'][0],user_id))\n except sqlite3.IntegrityError:\n conn.close()\n return \"此人已被推荐\"\n conn.commit()\n cursor.execute('update user set virtual_score=virtual_score+150 where id=?',\\\n (user_id,))\n conn.commit()\n conn.close()\n return \"恭喜获得150个推荐积分!\"\n\ndef auth_server(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select role from user where id=?',(user_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef all_user_s():\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,real_score,\\\n virtual_score,id from user where role=0')\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef all_pre_s():\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,QQ,\\\n email from pre')\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef zero_server(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('update user set virtual_score=0 ,real_score=0 where id=?',\\\n (user_id,))\n cursor.execute('delete from re where user_id=?',(user_id,))\n conn.commit()\n conn.close()\n\ndef get_user_by_id(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select name,telephone,QQ,email,virtual_score,\\\n real_score from user where id=?',\\\n (user_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef get_userid_by_re(re_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select user_id from re where id=?',(re_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef change_status_server(re_id,user_id,status,s):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('update re set status=? where id=?',(s,re_id))\n if status==1:\n cursor.execute('update user set virtual_score=virtual_score-50,\\\n real_score=real_score+50 where id=?',(user_id,))\n if status==2:\n cursor.execute('update user set virtual_score=virtual_score-100,\\\n real_score=real_score+100 where id=?',(user_id,))\n conn.commit()\n conn.close()\n\ndef search_users_by_phone(phone):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,id from user \\\n where telephone like ?',\\\n ('{}%'.format(phone),))\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef search_res_by_phone(phone):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,status,id from re \\\n where telephone like ?',\\\n ('{}%'.format(phone),))\n data=cursor.fetchall()\n conn.close()\n return data\n", "repo_name": "luozx207/gdyxscore", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 7065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sqlite3.IntegrityError", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 36, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sqlite3.IntegrityError", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 63, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 73, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 84, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 97, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 106, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sqlite3.IntegrityError", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 127, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 135, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 144, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 153, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 162, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 172, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 180, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 193, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 203, "usage_type": "call"}]} {"seq_id": "15474204709", "text": "from msilib.schema import Error\nfrom typing import Dict\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom ObjectNotExistException import ObjectNotExistException\nimport jwt_utils\nfrom question import Question\nfrom answer import Answer\nfrom dbhelper import DBHelper\nimport bcrypt\napp = Flask(__name__)\nCORS(app)\n\nusername_mdp = {\"admin\": \"Vive l'ESIEE !\"}\n\n###\n# AUTHENTICATION\n###\n\ndef check_token(token):\n try:\n token = token.split()\n if token[0] == \"Bearer\":\n token = token[1]\n return token\n except Exception as e:\n return e\n\n@app.route('/login', methods=['POST'])\ndef login():\n try:\n payload = request.get_json()\n username = payload[\"username\"]\n \n dbHelper = DBHelper()\n \n new_user = False\n\n password_hash = dbHelper.get_player_password_hash(username)\n if password_hash is None:\n new_user = True\n else:\n password_hash = password_hash.encode()\n password = payload[\"password\"]\n \n if new_user:\n password_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n dbHelper.add_player(username, password_hash.decode())\n\n if bcrypt.checkpw(password.encode(), password_hash):\n token = jwt_utils.build_token(username)\n return {\"token\": token}, 200\n except Exception as e:\n return '', 401\n return '', 401\n\n@app.route('/is-logged/', methods=['GET'])\ndef is_logged(username):\n if username == 'null':\n return {\"isLogged\": False}, 401\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n except AttributeError as e:\n return {\"isLogged\": False}, 401\n try:\n # check if the token is valid\n if jwt_utils.decode_token(token) == username:\n return {\"isLogged\": True}, 200\n else:\n return {\"isLogged\": False}, 401\n except jwt_utils.JwtError as e:\n return {\"isLogged\": False}, 401\n except Exception as e:\n return '', 401\n\n\n###\n# QUESTIONS\n###\n\n@app.route('/questions', methods=['POST'])\ndef add_question():\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n # check if the token is valid\n payload = request.get_json()\n if jwt_utils.decode_token(token) == \"admin\":\n\n question = Question(\n payload['title'], payload['text'], payload['image'], payload['position'])\n\n dbHelper = DBHelper()\n dbHelper.insert_question(question, payload['possibleAnswers'])\n return '', 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n\n@app.route('/questions/', methods=['GET'])\ndef get_question(position):\n try:\n dbHelper = DBHelper()\n question = dbHelper.get_question(position)\n\n if question is None:\n return '', 404\n\n ret = question.convertToJson()\n return ret, 200\n\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except Exception as e_base:\n return e_base.message, 404\n\n@app.route('/questions', methods=['GET'])\ndef get_questions():\n try:\n dbHelper = DBHelper()\n questions = dbHelper.get_questions()\n\n if questions is None:\n return '', 404\n return {\"questions\": questions}, 200\n\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except Exception as e_base:\n return e_base.message, 404\n\n@app.route('/questions/', methods=['DELETE'])\ndef delete_quetion(position):\n\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == \"admin\":\n\n dbHelper = DBHelper()\n dbHelper.delete_question(int(position))\n return '', 204\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except Exception as e_base:\n return e_base.message, 404\n\n@app.route('/questions/', methods=['PUT'])\ndef update_question(position):\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n # check if the token is valid\n if jwt_utils.decode_token(token) == \"admin\":\n\n payload = request.get_json()\n new_position = int(payload['position'])\n question = Question(\n payload['title'], payload['text'], payload['image'], int(position))\n dbHelper = DBHelper()\n\n dbHelper.update_question(\n new_position, question, payload['possibleAnswers'])\n\n return '', 200\n else:\n return '', 401\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except jwt_utils.JwtError as e:\n return e.message, 401\n\n\n###\n# PARTICIPATIONS\n###\n\n@app.route('/participations', methods=['POST'])\ndef set_participation():\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n payload = request.get_json()\n username = payload['username']\n answersId = payload['answers']\n # check if the token is valid\n\n if jwt_utils.decode_token(token) == username:\n\n dbHelper = DBHelper()\n\n question_count = dbHelper.get_question_count()\n\n if (question_count != len(answersId)):\n return \"Bad request\", 400\n\n correct_participation = dbHelper.get_correct_participation()\n if correct_participation is None:\n return \"Bad request\", 400\n \n score = 0\n\n for i in range(question_count):\n if correct_participation[i] == answersId[i]:\n score += 1\n\n dbHelper.set_score(username, score)\n\n result = {\"username\": username, \"score\": score}\n\n return result, 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n\n@app.route('/participations', methods=['DELETE'])\ndef delete_participations():\n\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == \"admin\":\n\n dbHelper = DBHelper()\n dbHelper.delete_participations()\n return 'ok deleted', 204\n else:\n return '', 401\n\n except jwt_utils.JwtError as e:\n return e.message, 401\n except Exception as e:\n return e.message, 401\n\n\n###\n# GET INFO\n###\n\n@app.route('/quiz-info', methods=['GET'])\ndef get_quiz_info():\n try:\n dbHelper = DBHelper()\n scores = dbHelper.get_players_score()\n numberQuestions = dbHelper.get_question_count()\n\n return {\"size\": numberQuestions, \"scores\": scores}, 200\n except Exception as e:\n return e.message, 401\n\n@app.route('/questions-count', methods=['GET'])\ndef get_question_count():\n try:\n dbHelper = DBHelper()\n count = dbHelper.get_question_count()\n return {\"count\": count}, 200\n except Exception as e:\n return e.message, 401\n\n@app.route('/get-last-score/', methods=['GET'])\ndef get_last_score(username):\n\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == username:\n dbHelper = DBHelper()\n score = dbHelper.get_last_score(username)\n if score == -1:\n return '', 404\n return {\"score\": score}, 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n except Exception as e:\n return e.message, 401\n\n@app.route('/get-best-score/', methods=['GET'])\ndef get_best_score(username):\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == username:\n dbHelper = DBHelper()\n score = dbHelper.get_best_score(username)\n if score == -1:\n return '', 404\n return {\"score\": score}, 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n except Exception as e:\n return e.message, 401\n\n@app.route('/questions//answers', methods=['GET'])\ndef get_answer(position):\n try:\n dbHelper = DBHelper()\n answers = dbHelper.get_answer(position)\n if answers is None:\n return '', 404\n \n for key in answers:\n answers[key].pop('questionID', None)\n answers[key].pop('isCorrect', None)\n return {\"answers\", answers}, 200\n except Exception as e:\n return e.message, 401\n \nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "Sangerwan/quiz-app", "sub_path": "quiz-api/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 9050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "dbhelper.DBHelper", "line_number": 35, "usage_type": "call"}, {"api_name": "bcrypt.hashpw", "line_number": 47, "usage_type": "call"}, {"api_name": "bcrypt.gensalt", "line_number": 47, "usage_type": "call"}, {"api_name": "bcrypt.checkpw", "line_number": 50, "usage_type": "call"}, {"api_name": "jwt_utils.build_token", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 68, "usage_type": "call"}, {"api_name": "jwt_utils.JwtError", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request.headers.get", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 89, "usage_type": "call"}, {"api_name": "question.Question", "line_number": 91, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 94, "usage_type": "call"}, {"api_name": "jwt_utils.JwtError", "line_number": 99, "usage_type": "attribute"}, {"api_name": "dbhelper.DBHelper", "line_number": 105, "usage_type": "call"}, {"api_name": "question.convertToJson", "line_number": 111, "usage_type": "call"}, {"api_name": "ObjectNotExistException.ObjectNotExistException", "line_number": 114, "usage_type": "name"}, {"api_name": "dbhelper.DBHelper", "line_number": 122, "usage_type": "call"}, {"api_name": "ObjectNotExistException.ObjectNotExistException", "line_number": 129, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 140, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 142, "usage_type": "call"}, {"api_name": "jwt_utils.JwtError", "line_number": 147, "usage_type": "attribute"}, {"api_name": "ObjectNotExistException.ObjectNotExistException", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 157, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 157, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 157, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 162, "usage_type": "name"}, {"api_name": "question.Question", "line_number": 164, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 166, "usage_type": "call"}, {"api_name": "ObjectNotExistException.ObjectNotExistException", "line_number": 174, "usage_type": "name"}, {"api_name": "jwt_utils.JwtError", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flask.request.headers.get", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 187, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 194, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 196, "usage_type": "call"}, {"api_name": "jwt_utils.JwtError", "line_number": 220, "usage_type": "attribute"}, {"api_name": "flask.request.headers.get", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 227, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 227, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 229, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 231, "usage_type": "call"}, {"api_name": "jwt_utils.JwtError", "line_number": 237, "usage_type": "attribute"}, {"api_name": "dbhelper.DBHelper", "line_number": 250, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 271, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 271, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 273, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 274, "usage_type": "call"}, {"api_name": "jwt_utils.JwtError", "line_number": 281, "usage_type": "attribute"}, {"api_name": "flask.request.headers.get", "line_number": 289, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 289, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 289, "usage_type": "name"}, {"api_name": "jwt_utils.decode_token", "line_number": 291, "usage_type": "call"}, {"api_name": "dbhelper.DBHelper", "line_number": 292, "usage_type": "call"}, {"api_name": "jwt_utils.JwtError", "line_number": 299, "usage_type": "attribute"}, {"api_name": "dbhelper.DBHelper", "line_number": 307, "usage_type": "call"}]} {"seq_id": "12561448500", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# Python modules\nimport datetime\nimport time\nimport configparser as ConfigParser\nimport json\nimport uuid\nimport zmq\n\n# Added modules\nimport pymongo\nfrom pymongo import MongoClient\nfrom pymongo import errors as PyError\n# Own Modules\n\nfrom logger import logger\nlogger = logger('server_init', stream_level='INFO')\n\n\nclass InitProcess(object):\n\n def __init__(self, config_file, restart):\n \"\"\" \"\"\"\n self.config_file = config_file\n self.dbs_names = ['process_profile',\n 'current_profile',\n 'stored_profile',\n 'process_link',\n 'current_link',\n 'stored_link',\n 'change_link',\n 'context_link',\n 'context',\n 'activity',\n 'process_tweet',\n 'tweet_info',\n 'stored_tweet',\n 'rand_lvl2',\n 'rand_lvl3',\n 'full_link']\n\n self.restart = restart.lower()\n\n def init_values(self):\n \"\"\" \"\"\"\n self.read_config(self.config_file)\n self.dbs = self.build_dbs(self.address_db, self.db_name)\n logger.info(self.dbs)\n\n self.loop_interval = int(self.loop_interval)\n self.time_lvl2 = self.get_time_lvl2()\n if self.restart == 'false':\n self.build_index(**self.dbs)\n self.twitter_key = self.get_keys(self.twitter_file)\n self.client_id = self.get_clients()\n lvl1_list = self.get_lvl1(self.lvl1_file)\n self.set_lvl1 = set([int(i) for i in lvl1_list])\n self.loop_number = 1\n self.nbr_client = set()\n self.record_lvl1(lvl1_list)\n # self.max_limit = Limit(self.loop_interval, len(self.client_id)).calculing_limit()\n self.start_time = datetime.datetime.now()\n # self.write_start()\n # self.stop_time = self.get_stop_time()\n # These values are set up in the config file\n elif self.restart == 'true':\n self.client_id = self.get_clients()\n lvl1_list = self.get_lvl1(self.lvl1_file)\n self.set_lvl1 = set([int(i) for i in lvl1_list])\n self.get_restart()\n\n else:\n raise('Need to enter \"true\" or \"false\"')\n\n return {'loop_interval': self.loop_interval,\n 'databases': self.dbs,\n 'time_lvl2': self.time_lvl2,\n 'set_lvl1': self.set_lvl1,\n 'loop_number': self.loop_number,\n 'nbr_client': self.client_id}\n\n def get_restart(self):\n \"\"\" function to get the values from the db to restart \"\"\"\n # self.nbr_client = set()\n # self.set_lvl1 = set()\n loops = set()\n logger.info('Doing Profile for loop')\n for profile in self.dbs['process_profile'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = profile['loop_number']\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from profile: {}'.format(loops))\n break\n logger.info('Doing Links for loop')\n for links in self.dbs['process_link'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = links['loop_number']\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from links: {}'.format(loops))\n break\n logger.info('Doing rand_3 for loop')\n for links in self.dbs['rand_lvl3'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = links['loop_number'] - 1\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from rand_lvl3: {}'.format(loops))\n break\n logger.info('Doing rand_2 for loop')\n for links in self.dbs['rand_lvl2'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = links['loop_number'] - 1\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from rand_lvl2: {}'.format(loops))\n break\n # self.set_lvl1.add(profile.pop('id_str', None))\n self.loop_number = min(int(s) for s in loops)\n logger.info('loop_number: {}'.format(self.loop_number))\n\n def write_start(self):\n \"\"\" \"\"\"\n json.dump({'loop_interval': self.loop_interval,\n 'client_id': self.client_id,\n 'time_lvl2': self.time_lvl2,\n 'databases': str(self.dbs),\n 'start_time': str(self.start_time),\n 'lvl1': self.set_lvl1}, open('start_params.txt', 'w'))\n\n def read_config(self, config_file):\n Config = ConfigParser.ConfigParser()\n Config.read(config_file)\n for section in Config.sections():\n for option in Config.options(section):\n setattr(self, option, Config.get(section, option))\n\n def build_dbs(self, address, db_name):\n \"\" \"\"\n if address is None:\n c = MongoClient()\n else:\n c = MongoClient(address)\n db = c[db_name]\n return {k: db[k] for k in self.dbs_names}\n\n def build_index(self, **kwargs):\n \"\"\" \"\"\"\n try:\n kwargs['process_profile'].create_index([('id_str', pymongo.DESCENDING),\n ('loop_number', pymongo.ASCENDING)],\n unique=True)\n kwargs['process_profile'].create_index('extra')\n kwargs['process_profile'].create_index('doing')\n\n kwargs['stored_profile'].create_index('id_str', unique=True)\n\n kwargs['process_link'].create_index([('id_str', pymongo.ASCENDING),\n ('type_link', pymongo.DESCENDING)],\n unique=True)\n kwargs['full_link'].create_index([('id_str', pymongo.ASCENDING),\n ('type_link', pymongo.DESCENDING),\n ('loop_number', pymongo.ASCENDING)],\n unique=True)\n\n kwargs['process_tweet'].create_index('id_str', unique=True)\n kwargs['process_tweet'].create_index('processing')\n\n kwargs['stored_link'].create_index('type_link')\n kwargs['stored_link'].create_index('id_str')\n\n kwargs['context_link'].create_index('loop_number')\n kwargs['context_link'].create_index('loop_number')\n\n kwargs['activity'].create_index('id_str')\n kwargs['activity'].create_index('loop_number')\n\n kwargs['context'].create_index('id_str')\n kwargs['stored_tweet'].create_index('id_str', unique=True)\n kwargs['tweet_info'].create_index('id_str', unique=True)\n kwargs['rand_lvl2'].create_index('id_str', unique=True)\n kwargs['rand_lvl3'].create_index('id_str', unique=True)\n\n except PyError.ServerSelectionTimeoutError:\n raise('Error in DBS connection, check if MongoDB is alive')\n\n def get_keys(self, twitter_file):\n \"\"\" \"\"\"\n keydict = {}\n with open(twitter_file, 'r') as f:\n for line in f:\n key, val = line.split(':')\n keydict[key] = val[:-1]\n return keydict\n\n def get_clients(self):\n \"\"\" \"\"\"\n client_id = list()\n\n context_status = zmq.Context()\n timeout_start = time.time()\n\n status_sock = context_status.socket(zmq.REP)\n status_sock.setsockopt(zmq.RCVTIMEO, 2000)\n # status_sock.setsockopt(zmq.RCVTIMEO, 2000)\n status_sock.bind(\"tcp://0.0.0.0:{}\".format(self.status_port))\n while time.time() < (timeout_start + 10):\n try:\n client_to_connect = status_sock.recv()\n if client_to_connect.decode() == 'id_request':\n _id = str(uuid.uuid4())\n data = {'client_id': _id, 'dbs_names': self.dbs_names}\n status_sock.send_json(data)\n client_id.append(_id)\n else:\n status_sock.send('too early'.encode('utf-8'))\n except zmq.error.Again:\n pass\n # time.sleep(2)\n\n logger.info('Get {} clients'.format(len(client_id)))\n logger.info('Close the socket')\n status_sock.close()\n context_status.term()\n logger.info('Socket closed')\n return client_id\n\n def get_time_lvl2(self):\n \"\"\"docstring for time_restrain\"\"\"\n try:\n if int(self.time_lvl2) != 1:\n if int(self.time_lvl2) < int(self.loop_interval):\n value = int(self.loop_interval) *4\n else:\n value = int(self.loop_interval)* int(self.time_lvl2)\n else:\n value = int(self.time_lvl2)\n except ValueError: # In case of None or string or anything\n # not a number\n value = int(self.loop_interval)* 4\n return value\n\n def get_lvl1(self, lvl1_file):\n logger.info('Getting the lvl1 file - {}'.format(lvl1_file))\n with open(lvl1_file, 'r') as f:\n return [line[:-1] for line in f]\n\n def record_lvl1(self, lvl1_list):\n \"\"\" \"\"\"\n for user in lvl1_list:\n info_user = dict()\n info_user['id_str'] = int(user)\n info_user['loop_number'] = 1\n\n self.dbs['process_profile'].insert_one(info_user)\n self.dbs['process_tweet'].insert_one(info_user)\n info_user['type_link'] = 'followers'\n self.dbs['process_link'].insert_one(info_user)\n info_user['type_link'] = 'friends'\n try:\n del info_user['_id']\n except KeyError:\n pass\n self.dbs['process_link'].insert_one(info_user)\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Oliph/PhD-WebScience", "sub_path": "server_init.py", "file_name": "server_init.py", "file_ext": "py", "file_size_in_byte": 10699, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "logger.logger", "line_number": 19, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 50, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 50, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "logger.logger.info", "line_number": 89, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 96, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 98, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 98, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 105, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 105, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 107, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 107, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 114, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 114, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 116, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 116, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 123, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 123, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 127, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 127, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 131, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 139, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 148, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 150, "usage_type": "call"}, {"api_name": "pymongo.DESCENDING", "line_number": 157, "usage_type": "attribute"}, {"api_name": "pymongo.ASCENDING", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pymongo.ASCENDING", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pymongo.DESCENDING", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pymongo.ASCENDING", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pymongo.DESCENDING", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pymongo.ASCENDING", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pymongo.errors.ServerSelectionTimeoutError", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pymongo.errors", "line_number": 191, "usage_type": "name"}, {"api_name": "zmq.Context", "line_number": 207, "usage_type": "call"}, {"api_name": "time.time", "line_number": 208, "usage_type": "call"}, {"api_name": "zmq.REP", "line_number": 210, "usage_type": "attribute"}, {"api_name": "zmq.RCVTIMEO", "line_number": 211, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 214, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 218, "usage_type": "call"}, {"api_name": "zmq.error", "line_number": 224, "usage_type": "attribute"}, {"api_name": "logger.logger.info", "line_number": 228, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 228, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 229, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 229, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 232, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 232, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 251, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 251, "usage_type": "name"}]} {"seq_id": "11303693181", "text": "import os, sys\nimport logging\nimport multiprocessing as mp\n\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\n\nsys.path.insert(0, os.path.dirname(os.getcwd()))\nfrom Core.CoreSystem import Helper\n\n\nclass clsParameters():\n\n def __init__(self, options):\n self.strUser = options.user_name\n self.strProject = options.project_name.replace('.txt', '') ## A user can be confused the input. So I prevented from it using 'replace'.\n self.strGroup = options.group\n self.intCore = options.thread\n\n self.strSampleList = 'User/{user}/{project}.txt'.format(user=options.user_name, project=options.project_name)\n\n\ndef SummaryRandomBarcode(sFile_path):\n\n \"\"\"\n /Tmp\n 190819_Nahye_24k_2_D0_2-24kLib_Classified_Indel_barcode.fastq* -> process target\n 190819_Nahye_24k_2_D0_2-24kLib_Indel_freq.txt*\n 190819_Nahye_24k_2_D0_2-24kLib_Indel_summary.txt*\n 190819_Nahye_24k_2_D0_2-24kLib_Summary.txt*\n Pickle\n\n dBarcode_cnt = {'ACGTACTC_sorting_barcode': {'ACATACAC_random': 5, 'CGTGTTGA_random': 3, ...}\n \"\"\"\n dictBarcodeCnt = {}\n strClassCheck = ''\n\n strSample = sFile_path.split('/')[-1]\n logging.info('Summary_random_barcode start : %s, %s' % (sFile_path, strSample))\n\n for sFile in os.listdir(sFile_path+'/Tmp/'):\n if '.fastq' in sFile:\n with open(sFile_path+'/Tmp/'+sFile) as Input:\n for i, strRow in enumerate(Input):\n\n # @D00235:683:CE1P6ANXX:6:1114:2135:5231 1:N:0:CTGAAGCT+CCTATCCT:Barcode_TTTGCTATCTCGACGTATGGACAGTG:total\n if i % 4 == 0:\n listBarClass = strRow.replace('\\n','').split('Barcode_')[1].split(':')\n strBarcode = listBarClass[0]\n strClass = listBarClass[1]\n\n if strClass == 'total':\n strClassCheck = 'total'\n\n if i % 4 == 1 and strClassCheck == 'total':\n strRow = strRow.replace('\\n','').upper()\n intBarcodeStart = strRow.find(strBarcode)\n strRandom_barcode = strRow[intBarcodeStart-8:intBarcodeStart]\n\n try:\n _ = dictBarcodeCnt[strBarcode]\n except KeyError:\n dictBarcodeCnt[strBarcode] = {}\n try:\n dictBarcodeCnt[strBarcode][strRandom_barcode] += 1\n except KeyError:\n dictBarcodeCnt[strBarcode][strRandom_barcode] = 1\n #print(sBarcode, sRandom_barcode, iBarcode_start, sRow)\n\n strClassCheck = ''\n\n if not os.path.isdir(sFile_path + '/Summary_Random_barcode'): os.mkdir(sFile_path + '/Summary_Random_barcode')\n with open(sFile_path + '/Summary_Random_barcode/%s_all_random_barcode.txt' % strSample, 'w') as All_random,\\\n open(sFile_path + '/Summary_Random_barcode/%s_Unique_RandomBarcodeNumber_In_SortingBarcode.txt' % strSample, 'w') as Random_sorting:\n\n All_random.write('Sorting_barcode\\tUnique_RandomBarcodeNumber_In_SortingBarcode\\tRandomBarcode\\tEach_RandomBarcode_read_count\\n')\n Random_sorting.write('Sorting_barcode\\tUnique_RandomBarcodeNumber_In_SortingBarcode\\n')\n\n for sBarcode, dRandom_barcode_cnt in dictBarcodeCnt.items():\n iRandom_barcode_num = len(dRandom_barcode_cnt.keys())\n Random_sorting.write('\\t'.join(map(str, [sBarcode, iRandom_barcode_num]))+'\\n')\n\n for sRandom_barcode, iCnt in dRandom_barcode_cnt.items():\n All_random.write('\\t'.join(map(str, [sBarcode, iRandom_barcode_num, sRandom_barcode, iCnt]))+'\\n')\n\n logging.info('Summary_random_barcode end: %s' % sFile_path)\n\n## on going\ndef CountGroup(InstParameters):\n \"\"\"\n Sorting_barcode Unique_RandomBarcodeNumber_In_SortingBarcode RandomBarcode Each_RandomBarcode_read_count\n TATATCATAGCGTACTCATC 8 TGCGTTTG 3\n TATATCATAGCGTACTCATC 8 CGCGTTTG 3\n TATATCATAGCGTACTCATC 8 TAGTTTTG 1\n TATATCATAGCGTACTCATC 8 ATAGTTTG 1\n \"\"\"\n\n sHeader = ''\n\n with open(InstParameters.strSampleList) as Sample: ## tmp input\n\n listSample = Sample.readlines()\n\n setGroup = set([strRow.replace('\\n', '').split('\\t')[2].upper() for strRow in listSample])\n\n for strGroup in setGroup:\n if strGroup == 'CTRL': continue\n\n for strRow in listSample:\n if strGroup == strGroupOfSample: ## matched group names -> Sum the counts\n listCol = strRow.replace('\\n', '').split('\\t')\n strSample = listCol[0]\n strRef = listCol[1]\n strGroupOfSample = listCol[2]\n\n strProjectDir = './Output/{user}/{project}'.format(user=InstParameters.strUser,\n project=InstParameters.strProject)\n strGroupDir = os.path.join(strProjectDir, 'Group_result')\n Helper.MakeFolderIfNot(strGroupDir)\n\n dTotal_RandomBarcode_cnt_in_SortingBarcode = OrderedDict() ## ('GECKO_6367_GATCTGCTC', ['GECKO_6367', 'GATCTGCTC', 2, 156, '0.0128']),\n ## Unique key, only one list.\n\n with open('{project_dir}/{sample}_all_random_barcode.txt'.format(project_dir=strProjectDir,\n sample=strSample)) as RandomBarcode_SeqFreq:\n sHeader = RandomBarcode_SeqFreq.readline()\n\n for sRow in RandomBarcode_SeqFreq:\n lCol = sRow.replace('\\n', '').split('\\t')\n\n sSortingBarcode = lCol[0]\n #iTotal_RandomBarcode_cnt_in_SortingBarcode = int(lCol[1])\n sSorting_and_Random_barcode_seq = lCol[0] + '_' + lCol[2] ## Unique name : Doench2014_1000_CTCTGGGGT\n iRandomBarcode_count = int(lCol[3])\n\n lCol[3] = iRandomBarcode_count\n\n try:\n _ = dTotal_RandomBarcode_cnt_in_SortingBarcode[sSorting_and_Random_barcode_seq]\n\n dTotal_RandomBarcode_cnt_in_SortingBarcode[sSorting_and_Random_barcode_seq][3] += iRandomBarcode_count\n\n except KeyError:\n dTotal_RandomBarcode_cnt_in_SortingBarcode[sSorting_and_Random_barcode_seq] = lCol ## initial assignment\n #END for\n dRecal_total_kind_of_RandomBarcode = OrderedDict()\n for sSort_Rand_seq in dTotal_RandomBarcode_cnt_in_SortingBarcode: ## sSorting_and_Random_barcode_seq\n sSortBarcode = sSort_Rand_seq.split('_')[0]\n try:\n dRecal_total_kind_of_RandomBarcode[sSortBarcode].append(dTotal_RandomBarcode_cnt_in_SortingBarcode[sSort_Rand_seq])\n except KeyError:\n dRecal_total_kind_of_RandomBarcode[sSortBarcode] = [dTotal_RandomBarcode_cnt_in_SortingBarcode[sSort_Rand_seq]]\n\n for sKey, llValue in dRecal_total_kind_of_RandomBarcode.items():\n ## sKey: TATATCATAGCGTACTCATC, llValue : [[TATATCATAGCGTACTCATC, 8, TGCGTTTG, 3],[],[] ...\n iKind_of_RandomBarcode = len(llValue) ################## why do I make like this ?????\n for lValue in llValue:\n lValue[1] = iKind_of_RandomBarcode ## Recal using group total cnt.\n\n llValue = sorted(llValue, key=lambda x:x[3], reverse=True)\n dRecal_total_kind_of_RandomBarcode[sKey] = llValue\n\n strEachGroup = './Output/Group_result/%s' % strGroup\n Helper.MakeFolderIfNot(strEachGroup)\n\n with open(os.path.join(strEachGroup, 'Summary_all_random_barcode_in_group.txt'), 'w') as Sort_Random_cnt,\\\n open(os.path.join(strEachGroup, 'Summary_Unique_RandomBarcodeNumber_in_group.txt'), 'w') as Uniq_random_cnt:\n\n Sort_Random_cnt.write(sHeader)\n Uniq_random_cnt.write('Sorting_barcode\\tUnique_RandomBarcodeNumber_In_SortingBarcode\\n')\n\n for sSortBarcode, llCol in dRecal_total_kind_of_RandomBarcode.items():\n Uniq_random_cnt.write('\\t'.join(map(str, [sSortBarcode, len(llCol)]))+'\\n')\n for lCol in llCol:\n Sort_Random_cnt.write('\\t'.join(map(str, lCol))+'\\n')\n #END: for\n #END: with\n\n\ndef Main():\n\n logging.info('Program Start')\n logging.info('Make commands for a multiple processing')\n\n parser = ArgumentParser(description='Script for counting the random barcodes')\n\n parser.add_argument('-u', '--user_name', type=str, dest='user_name', help='The user name in the /user subdir')\n parser.add_argument('-p', '--project_name', type=str, dest='project_name', help='The project name in the /user/user_name/ subdir')\n parser.add_argument('-g', '--group', type=str, dest='group', default='false', help='The group sum run of the barcodes, default: false')\n parser.add_argument('-t', '--thread', type=int, dest='thread', default='15', help='The multicore number 1~15')\n options = parser.parse_args()\n\n InstParameters = clsParameters(options)\n\n lPara = []\n\n with open(InstParameters.strSampleList) as SampleList:\n\n for strSample in SampleList:\n if strSample[0] == '#' or strSample[0] in ['', ' ', '\\r', '\\n', '\\r\\n']: continue\n strSample = strSample.replace('\\n', '').replace('\\r', '').split('\\t')[0]\n sFile_path = './Output/{user}/{project}/{sample}'.format(user=options.user_name,\n project=options.project_name,\n sample=strSample)\n #print('sFile_path', sFile_path)\n lPara.append(sFile_path)\n\n ## single_test\n #Summary_random_barcode(lPara[0])\n\n logging.info('Multiple processing Start')\n p = mp.Pool(options.thread)\n p.map_async(SummaryRandomBarcode, lPara).get()\n logging.info('Multiple processing End')\n\n #logging.info('Count group Start')\n #CountGroup(InstParameters)\n #logging.info('Count group End')\n\n #logging.info('Program End')\n\nMain()\n", "repo_name": "CRISPRJWCHOI/CRISPR_toolkit", "sub_path": "Indel_searcher_2/Summary_Random_barcode.py", "file_name": "Summary_Random_barcode.py", "file_ext": "py", "file_size_in_byte": 10848, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "46", "api": [{"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "Core.CoreSystem.Helper.MakeFolderIfNot", "line_number": 119, "usage_type": "call"}, {"api_name": "Core.CoreSystem.Helper", "line_number": 119, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 121, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 146, "usage_type": "call"}, {"api_name": "Core.CoreSystem.Helper.MakeFolderIfNot", "line_number": 164, "usage_type": "call"}, {"api_name": "Core.CoreSystem.Helper", "line_number": 164, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 182, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 183, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 185, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 211, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 212, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 214, "usage_type": "call"}]} {"seq_id": "23472306307", "text": "import logging\nlogger = logging.getLogger(__name__)\n\nimport os\nimport re\nimport yaml\nimport json\nfrom comment_parser import comment_parser\n\n\nclass Parser():\n def __init__(self, threatmodel):\n\n self.threatmodel = threatmodel\n\n self.action_table = {}\n self.action_table[\"mitigate\"] = self.threatmodel.add_mitigation\n self.action_table[\"accept\"] = self.threatmodel.add_acceptance\n self.action_table[\"transfer\"] = self.threatmodel.add_transfer\n self.action_table[\"expose\"] = self.threatmodel.add_exposure\n self.action_table[\"connect\"] = self.threatmodel.add_connection\n self.action_table[\"review\"] = self.threatmodel.add_review\n self.action_table[\"test\"] = self.threatmodel.add_test\n self.action_table[\"threat\"] = self.threatmodel.add_threat\n self.action_table[\"control\"] = self.threatmodel.add_control\n self.action_table[\"component\"] = self.threatmodel.add_component\n\n self.patterns = {}\n self.patterns[\"mitigate\"] = r'@mitigates? (?P.*?) against (?P.*?) with (?P.*)'\n self.patterns[\"accept\"] = r'@accepts? (?P.*?) to (?P.*?) with (?P
.*)'\n self.patterns[\"transfer\"] = r'@transfers? (?P.*?) from (?P.*?) to (?P.*?) with (?P
.*)'\n self.patterns[\"expose\"] = r'@exposes? (?P.*?) to (?P.*?) with (?P
.*)'\n self.patterns[\"connect\"] = r'@connects? (?P.*?) (?Pwith|to) (?P.*?) with (?P
.*)'\n self.patterns[\"review\"] = r'@reviews? (?P.*?) (?P
.*)'\n self.patterns[\"test\"] = r'@tests? (?P.*?) for (?P.*)'\n\n self.patterns[\"threat\"] = r'@threat (?P.*)'\n self.patterns[\"control\"] = r'@control (?P.*)'\n self.patterns[\"component\"] = r'@component (?P.*)'\n\n self.cwd = os.getcwd()\n\n def run_action(self, data, source):\n action = data.pop(\"action\")\n self.action_table[action](data, source=source)\n\n def is_extended(self, line):\n return line[-1] == \":\"\n\n def is_threatspec_line(self, line):\n for key in self.patterns.keys():\n if \"@{}\".format(key) in line:\n return True\n return False\n\n def check_file(self, filename):\n logger.debug(\"Parsing file {}\".format(filename))\n if filename.startswith(self.cwd):\n return filename.replace(self.cwd, \"\", 1).lstrip(os.path.sep)\n return filename\n\n\nclass CommentParser(Parser):\n def __init__(self, threatmodel, mime=None):\n super().__init__(threatmodel)\n self.mime = mime\n\n def parse_comment(self, comment):\n annotations = []\n\n LINE = 0\n EXTENDED = 1\n\n state = LINE\n extended_lines = []\n data = None\n\n line_number = 1\n\n for line in comment.split(\"\\n\"):\n stripped_line = self.strip(line)\n if state == LINE:\n for action in self.patterns.keys():\n if stripped_line.startswith(\"@\" + action):\n data = {\"action\": action, \"line\": line_number, \"annotation\": stripped_line}\n extended_lines = []\n pattern = self.patterns[action]\n if self.is_extended(stripped_line):\n state = EXTENDED\n stripped_line = stripped_line[0:-1]\n m = re.match(pattern, stripped_line, re.M | re.I)\n if m:\n data.update(m.groupdict())\n if state == LINE:\n annotations.append(data)\n else:\n raise Exception(\"Could not parse {} pattern:\\n{} for comment line:\\n{}\".format(action, pattern, line))\n\n elif state == EXTENDED:\n if stripped_line == \"\":\n state = LINE\n self.process_extended_lines(extended_lines, data, annotations)\n extended_lines = []\n else:\n extended_lines.append(self.strip_stars(line))\n\n line_number += 1\n\n if len(extended_lines) > 0:\n self.process_extended_lines(extended_lines, data, annotations)\n\n return annotations\n\n def process_extended_lines(self, extended_lines, data, annotations):\n extended_text = \"\\n\".join(extended_lines)\n data[\"annotation\"] += \"\\n\" + extended_text\n data.update(yaml.load(extended_text, Loader=yaml.SafeLoader))\n annotations.append(data)\n\n def strip(self, line):\n return self.strip_stars(line).strip()\n\n def strip_stars(self, line):\n if self.mime not in [\"text/html\", \"text/x-shellscript\", \"text/xml\"]:\n return re.sub(r\"\\s*\\*+\", \"\", line)\n return line\n\n \nclass TextFileParser(CommentParser):\n def parse_file(self, filename):\n filename = self.check_file(filename)\n\n with open(filename) as fh:\n data = fh.read()\n\n source = {\n \"filename\": filename,\n \"code\": \"\"\n }\n\n for data in self.parse_comment(data):\n source[\"annotation\"] = data.pop(\"annotation\")\n source[\"line\"] = data.pop(\"line\")\n self.run_action(data, source)\n\n\nclass SourceFileParser(CommentParser):\n\n def __init__(self, threatmodel, mime=None):\n super().__init__(threatmodel)\n self.mime = mime\n\n def extract_comment_context(self, lines, commented_lines, start_line, num_lines, multiline=False):\n count = 0\n i = start_line\n code = []\n\n capture_first_line = not multiline\n\n for line in lines[start_line - 1:]:\n if count >= num_lines:\n return \"\".join(code)\n\n if capture_first_line:\n code.append(line)\n capture_first_line = False\n\n if i not in commented_lines:\n code.append(line)\n count += 1\n i += 1\n return \"\".join(code)\n\n def get_lines(self, filename):\n try:\n with open(filename) as fh:\n return fh.readlines()\n except UnicodeDecodeError:\n return None\n\n def parse_file(self, filename):\n logger.debug(\"Parsing file {}\".format(filename))\n\n lines = self.get_lines(filename)\n if not lines:\n return\n\n commented_line_numbers = []\n comments = []\n try:\n for comment in comment_parser.extract_comments(filename, self.mime):\n comment_text = comment.text()\n comment_line = comment.line_number()\n if comment.is_multiline():\n offset = len(comment_text.split(\"\\n\"))\n commented_line_numbers += range(comment_line, comment_line + offset)\n else:\n offset = 0\n commented_line_numbers.append(comment_line)\n comments.append({\n \"text\": comment_text,\n \"line\": comment_line,\n \"offset\": offset,\n \"multiline\": comment.is_multiline()\n })\n except comment_parser.UnsupportedError as e:\n print(e)\n return\n\n for comment in comments:\n comment[\"text\"] = comment[\"text\"].strip()\n num_lines = 5 # Get 5 lines of code\n code = self.extract_comment_context(lines, commented_line_numbers, comment[\"line\"] + comment[\"offset\"], num_lines, comment[\"multiline\"])\n\n source = {\n \"code\": code,\n \"filename\": filename\n }\n\n annotations = self.parse_comment(comment[\"text\"])\n if annotations:\n for data in annotations:\n source[\"line\"] = data.pop(\"line\")\n source[\"annotation\"] = data.pop(\"annotation\")\n self.run_action(data, source)\n\n\nclass YamlFileParser(Parser):\n def parse_annotation(self, annotation, data={}):\n stripped_line = annotation.strip()\n for action in self.patterns.keys():\n if stripped_line.startswith(\"@\" + action):\n data[\"action\"] = action\n pattern = self.patterns[action]\n m = re.match(pattern, stripped_line, re.M | re.I)\n if m:\n data.update(m.groupdict())\n return data\n else:\n raise Exception(\"Could not parse {} pattern:\\n{} for comment line:\\n{}\".format(action, pattern, stripped_line))\n\n def parse_key(self, data, parent, filename):\n if isinstance(data, str):\n annotation = self.parse_annotation(data)\n source = {\n \"annotation\": data,\n \"code\": json.dumps(parent, indent=2),\n \"filename\": filename,\n \"line\": 0\n }\n self.run_action(annotation, source)\n elif isinstance(data, list):\n for v in data:\n if not isinstance(v, str):\n raise Exception(\"Invalid value type for x-threatspec list in {}\".format(filename))\n annotation = self.parse_annotation(v)\n source = {\n \"annotation\": v,\n \"code\": json.dumps(parent, indent=2),\n \"filename\": filename,\n \"line\": 0\n }\n self.run_action(annotation, source)\n elif isinstance(data, dict):\n for k, v in data.items():\n annotation = self.parse_annotation(k, v)\n source = {\n \"annotation\": k,\n \"code\": json.dumps(parent, indent=2),\n \"filename\": filename,\n \"line\": 0\n }\n self.run_action(annotation, source)\n\n def parse_data(self, data, parent, filename):\n if isinstance(data, dict):\n for k, v in data.items():\n if k == \"x-threatspec\":\n self.parse_key(v, data, filename)\n else:\n self.parse_data(v, data, filename)\n elif isinstance(data, list):\n for v in data:\n self.parse_data(v, data, filename)\n\n def parse_file(self, filename):\n filename = self.check_file(filename)\n\n with open(filename) as fh:\n file_data = yaml.load(fh, Loader=yaml.SafeLoader)\n self.parse_data(file_data, {}, filename)\n", "repo_name": "threatspec/threatspec", "sub_path": "threatspec/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 10716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 293, "dataset": "github-code", "pt": "46", "api": [{"api_name": "logging.getLogger", "line_number": 2, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 91, "usage_type": "call"}, {"api_name": "re.M", "line_number": 91, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 91, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 117, "usage_type": "call"}, {"api_name": "yaml.SafeLoader", "line_number": 117, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 125, "usage_type": "call"}, {"api_name": "comment_parser.comment_parser.extract_comments", "line_number": 191, "usage_type": "call"}, {"api_name": "comment_parser.comment_parser", "line_number": 191, "usage_type": "name"}, {"api_name": "comment_parser.comment_parser.UnsupportedError", "line_number": 206, "usage_type": "attribute"}, {"api_name": "comment_parser.comment_parser", "line_number": 206, "usage_type": "name"}, {"api_name": "re.match", "line_number": 235, "usage_type": "call"}, {"api_name": "re.M", "line_number": 235, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 235, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 247, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 259, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 269, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 290, "usage_type": "call"}, {"api_name": "yaml.SafeLoader", "line_number": 290, "usage_type": "attribute"}]} {"seq_id": "26129396775", "text": "from adestis_netbox_plugin_account_management.models import *\nfrom netbox.filtersets import NetBoxModelFilterSet\nfrom django.db.models import Q\n\nfrom dcim.models import *\nfrom utilities.filters import TreeNodeMultipleChoiceFilter\nfrom django.utils.translation import gettext as _\nimport django_filters\nfrom utilities.forms import (\n DynamicModelMultipleChoiceField, MultipleChoiceField, StaticSelect, TagFilterField, BOOLEAN_WITH_BLANK_CHOICES,\n)\nfrom virtualization.models import VirtualMachine, ClusterGroup, Cluster\nfrom tenancy.models import *\n\n__all__ = (\n 'SystemFilterSet',\n)\n\n\nclass SystemFilterSet(NetBoxModelFilterSet):\n \n cluster_group_id = DynamicModelMultipleChoiceField(\n queryset=ClusterGroup.objects.all(),\n required=False,\n label=_('Cluster group (name)')\n ) \n \n cluster_id = DynamicModelMultipleChoiceField(\n queryset=Cluster.objects.all(),\n required=False,\n label=_('Cluster (name)')\n ) \n \n device_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Device.objects.all(),\n label=_('Device (ID)'),\n )\n \n device = django_filters.ModelMultipleChoiceFilter(\n field_name='device__name',\n queryset=Device.objects.all(),\n to_field_name='name',\n label=_('Device (name)'),\n )\n \n virtual_machine_id = DynamicModelMultipleChoiceField(\n queryset=VirtualMachine.objects.all(),\n required=False,\n label=_('Virtual machine (name)'))\n\n group = TreeNodeMultipleChoiceFilter(\n queryset=TenantGroup.objects.all(),\n field_name='group',\n lookup_expr='in',\n to_field_name='group',\n label=_('Tenant group (group)'),\n )\n \n tenant_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Tenant.objects.all(),\n label=_('Tenant (ID)'),\n )\n \n tenant = django_filters.ModelMultipleChoiceFilter(\n queryset=Tenant.objects.all(),\n field_name='tenant__name',\n to_field_name='tenant',\n label=_('Tenant (name)'),\n )\n\n class Meta:\n model = System\n fields = ('id', 'tenant', 'group', 'cluster_group_id', 'cluster_id', 'device', 'virtual_machine_id', 'name', 'system_url', 'system_status') \n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(system_url__icontains=value) |\n Q(system_status__icontains=value)\n )\n", "repo_name": "adestis/netbox-account-management", "sub_path": "adestis_netbox_plugin_account_management/filtersets/system.py", "file_name": "system.py", "file_ext": "py", "file_size_in_byte": 2528, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "netbox.filtersets.NetBoxModelFilterSet", "line_number": 20, "usage_type": "name"}, {"api_name": "utilities.forms.DynamicModelMultipleChoiceField", "line_number": 22, "usage_type": "call"}, {"api_name": "virtualization.models.ClusterGroup.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "virtualization.models.ClusterGroup.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "virtualization.models.ClusterGroup", "line_number": 23, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 25, "usage_type": "call"}, {"api_name": "utilities.forms.DynamicModelMultipleChoiceField", "line_number": 28, "usage_type": "call"}, {"api_name": "virtualization.models.Cluster.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "virtualization.models.Cluster.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "virtualization.models.Cluster", "line_number": 29, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 31, "usage_type": "call"}, {"api_name": "django_filters.ModelMultipleChoiceFilter", "line_number": 34, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 36, "usage_type": "call"}, {"api_name": "django_filters.ModelMultipleChoiceFilter", "line_number": 39, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 43, "usage_type": "call"}, {"api_name": "utilities.forms.DynamicModelMultipleChoiceField", "line_number": 46, "usage_type": "call"}, {"api_name": "virtualization.models.VirtualMachine.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "virtualization.models.VirtualMachine.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "virtualization.models.VirtualMachine", "line_number": 47, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 49, "usage_type": "call"}, {"api_name": "utilities.filters.TreeNodeMultipleChoiceFilter", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 56, "usage_type": "call"}, {"api_name": "django_filters.ModelMultipleChoiceFilter", "line_number": 59, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 61, "usage_type": "call"}, {"api_name": "django_filters.ModelMultipleChoiceFilter", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 81, "usage_type": "call"}]} {"seq_id": "25740885459", "text": "import numpy as np\nimport pandas as pd\nfrom tqdm.notebook import trange\n\ndef vuln_sim(dists, sim_years, days_in_year=365, sample_count=1000000, clip=True, verbose=False):\n samples = pd.DataFrame({key:val.rvs(sample_count) for key, val in dists.items()})\n \n variants = {}\n active = []\n vuln_days = []\n t = trange(sim_years * days_in_year) if verbose else range(sim_years * days_in_year)\n for i in t:\n var_occur = np.random.choice(samples['occurence']) / days_in_year\n if var_occur > np.random.uniform():\n var_idx = len(variants)\n variants[var_idx] = {\n 'start_day': i,\n 'identification': np.random.choice(samples['identification']),\n 'remediation': np.random.choice(samples['remediation']),\n }\n variants[var_idx]['duration'] = variants[var_idx]['identification'] + variants[var_idx]['remediation']\n active.append(var_idx)\n\n efficacy = np.random.choice(samples['variant']) if len(active) > 0 \\\n else np.random.choice(samples['efficacy'])\n\n if efficacy < np.random.uniform():\n vuln_days.append(i)\n\n for var_idx in [*active]:\n var_end = 1 / variants[var_idx]['duration']\n if var_end > np.random.uniform():\n active.remove(var_idx)\n variants[var_idx]['end_day'] = i\n \n vuln_vals = (pd.Series(vuln_days) // days_in_year).value_counts().reindex(np.arange(1000)).fillna(0) / days_in_year\n if clip: vuln_vals = vuln_vals.clip(1/sim_years, 1 - (1/sim_years))\n \n return vuln_vals, variants", "repo_name": "Calvinxc1/BayesianPlayground", "sub_path": "tools/vuln_sim.py", "file_name": "vuln_sim.py", "file_ext": "py", "file_size_in_byte": 1634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "pandas.DataFrame", "line_number": 6, "usage_type": "call"}, {"api_name": "tqdm.notebook.trange", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 36, "usage_type": "call"}]} {"seq_id": "1436880719", "text": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport os\nimport json\nfrom datetime import datetime\n\nnow = datetime.now()\ndt_string = now.strftime(\"%d-%m-%Y_%H:%M:%S\")\n \ncred = credentials.Certificate(\"vinterprosjekt-it2-firebase-adminsdk-wi0bg-f0efd8881e.json\")\nfirebase_admin.initialize_app(cred, {'databaseURL':\"https://vinterprosjekt-it2-default-rtdb.europe-west1.firebasedatabase.app/\"})\n\nref = db.reference(\"/bazaar\")\n\nwith open(\"bazaar_info.json\", \"r\") as f:\n\tfile_contents = json.load(f)\n# ref.set(file_contents)\n\nref.child(dt_string).set(file_contents)\n\nos.remove(\"bazaar_info.json\")\n\nref = db.reference(\"/auctions\")\n\nwith open(\"auctions_info.json\", \"r\") as f:\n\tfile_contents = json.load(f)\n# ref.set(file_contents)\n\nref.child(dt_string).set(file_contents)\n\nos.remove(\"auctions_info.json\")\n\n# data_push = ref.push().set(file_contents)\n\n# post_id = data_push.key\n", "repo_name": "johahold/prosjekt_vinterferie", "sub_path": "data_management/push_to_database.py", "file_name": "push_to_database.py", "file_ext": "py", "file_size_in_byte": 925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "datetime.datetime.now", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "name"}, {"api_name": "firebase_admin.credentials.Certificate", "line_number": 11, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 11, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 12, "usage_type": "call"}, {"api_name": "firebase_admin.db.reference", "line_number": 14, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 14, "usage_type": "name"}, {"api_name": "json.load", "line_number": 17, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 22, "usage_type": "call"}, {"api_name": "firebase_admin.db.reference", "line_number": 24, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 24, "usage_type": "name"}, {"api_name": "json.load", "line_number": 27, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 32, "usage_type": "call"}]} {"seq_id": "39277673078", "text": "from __future__ import absolute_import, unicode_literals\n\nimport json\nimport logging\n\nfrom django.conf import settings\nfrom django.db.models import Q, Count\nfrom django.core.management.base import BaseCommand\n\nfrom muses.collection.models import Item\nfrom muses.naive_classification.helpers_os import predict_image_path_dict\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n \"\"\"Classify.\"\"\"\n\n help = \"Classify items with our AI.\"\n\n requires_system_checks = False\n\n def add_arguments(self, parser):\n parser.add_argument('--update-existing',\n action='store_true',\n dest='update_existing',\n default=False,\n help=\"Update existing classifications.\")\n\n def handle(self, *args, **options):\n \"\"\"Handle.\n\n :param args:\n :param options:\n :return:\n \"\"\"\n update_existing = bool(options['update_existing'])\n\n filters = []\n if not update_existing:\n for field in ['classified_as']:\n filters.append(\n Q(**{\"{}__isnull\".format(field): True})\n | Q(**{\"{}__exact\".format(field): ''})\n )\n\n items = Item \\\n .objects \\\n .filter(*filters) \\\n .prefetch_related('images') \\\n .annotate(num_images=Count('images')) \\\n .filter(num_images__gt=0)\n\n for item in items:\n paths = []\n for image in item.images.all():\n try:\n paths.append(image.image.path)\n except Exception as err:\n LOGGER.warning(err)\n\n conf = settings \\\n .MUSES_CONFIG['classification']['naive_classification']\n model_path = conf['model_path']\n\n try:\n classification = predict_image_path_dict(\n paths,\n model_path=model_path\n )\n except Exception as err:\n LOGGER.warning(err)\n continue\n\n top_results = list(classification.items())[:5]\n if top_results:\n try:\n item.classified_as = top_results\n item.save()\n except Exception as err:\n pass\n", "repo_name": "Aincient/cleo", "sub_path": "src/muses/collection/management/commands/muses_classify.py", "file_name": "muses_classify.py", "file_ext": "py", "file_size_in_byte": 2379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 44, "usage_type": "call"}, {"api_name": "muses.collection.models.Item.objects.filter", "line_number": 47, "usage_type": "call"}, {"api_name": "muses.collection.models.Item.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "muses.collection.models.Item", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.settings.MUSES_CONFIG", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 62, "usage_type": "name"}, {"api_name": "muses.naive_classification.helpers_os.predict_image_path_dict", "line_number": 67, "usage_type": "call"}]} {"seq_id": "29946361664", "text": "\"\"\"Provides a ChatBot UI for a Github Repository. Powered by Llama Index and Panel\"\"\"\nimport os\nimport pickle\nfrom pathlib import Path\n\nimport nest_asyncio\nimport panel as pn\nimport param\nfrom llama_index import VectorStoreIndex, download_loader\n\nfrom llama_hub.github_repo import GithubClient, GithubRepositoryReader\n\n# needed because both Panel and GithubRepositoryReader starts up the ioloop\nnest_asyncio.apply()\n\nCACHE_PATH = Path(\".cache/panel_chatbot\")\nCACHE_PATH.mkdir(parents=True, exist_ok=True)\n\nCHAT_GPT_LOGO = \"https://upload.wikimedia.org/wikipedia/commons/thumb/0/04/ChatGPT_logo.svg/512px-ChatGPT_logo.svg.png\"\nCHAT_GPT_URL = \"https://chat.openai.com/\"\nLLAMA_INDEX_LOGO = (\n \"https://cdn-images-1.medium.com/max/280/1*_mrG8FG_LiD23x0-mEtUkw@2x.jpeg\"\n)\nPANEL_LOGO = {\n \"default\": \"https://panel.holoviz.org/_static/logo_horizontal_light_theme.png\",\n \"dark\": \"https://panel.holoviz.org/_static/logo_horizontal_dark_theme.png\",\n}\n\nGITHUB_LOGO = \"https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png\"\nGITHUB_URL = \"https://github.com/\"\nLLAMA_INDEX_URL = \"https://www.llamaindex.ai/\"\nPANEL_URL = \"https://panel.holoviz.org/index.html\"\nGITHUB_COPILOT_LOGO = (\n \"https://plugins.jetbrains.com/files/17718/447537/icon/pluginIcon.svg\"\n)\n\nINDEX_NOT_LOADED = \"No repository loaded\"\nINDEX_LOADED = \"Repository loaded\"\nLOADING_EXISTING_DOCS = \"Loading existing docs\"\nLOADING_NEW_DOCS = \"Downloading documents\"\nLOADING_EXISTING_INDEX = \"Loading existing index\"\nLOADING_NEW_INDEX = \"Creating index\"\nCUTE_LLAMA = \"https://raw.githubusercontent.com/run-llama/llama-hub/main/llama_hub/llama_packs/panel_chatbot/llama_by_sophia_yang.png\"\nCUTE_LLAMA_URL = \"https://x.com/sophiamyang/status/1729810715467252080?s=20\"\n\npn.chat.ChatMessage.default_avatars.update(\n {\n \"assistant\": GITHUB_COPILOT_LOGO,\n \"user\": \"🦙\",\n }\n)\npn.chat.ChatMessage.show_reaction_icons = False\n\nACCENT = \"#ec4899\"\n\nCSS_FIXES_TO_BE_UPSTREAMED_TO_PANEL = \"\"\"\n#sidebar {\n padding-left: 5px !important;\n background-color: var(--panel-surface-color);\n}\n.pn-wrapper {\n height: calc( 100vh - 150px);\n}\n.bk-active.bk-btn-primary {border-color: var(--accent-fill-active)}\n.bk-btn-primary:hover {border-color: var(--accent-fill-hover)}\n.bk-btn-primary {border-color: var(--accent-fill-rest)}\na {color: var(--accent-fill-rest) !important;}\na:hover {color: var(--accent-fill-hover) !important;}\n\"\"\"\n\n\ndef _split_and_clean(cstext):\n return cstext.split(\",\")\n\n\nclass IndexLoader(pn.viewable.Viewer):\n \"\"\"The IndexLoader enables the user to interactively create a VectorStoreIndex from a\n github repository of choice\"\"\"\n\n value: VectorStoreIndex = param.ClassSelector(class_=VectorStoreIndex)\n\n status = param.String(constant=True, doc=\"A status message\")\n\n owner: str = param.String(\n default=\"holoviz\", doc=\"The repository owner. For example 'holoviz'\"\n )\n repo: str = param.String(\n default=\"panel\", doc=\"The repository name. For example 'panel'\"\n )\n filter_directories: str = param.String(\n default=\"examples,docs,panel\",\n label=\"Folders\",\n doc=\"A comma separated list of folders to include. For example 'examples,docs,panel'\",\n )\n filter_file_extensions: str = param.String(\n default=\".py,.md,.ipynb\",\n label=\"File Extensions\",\n doc=\"A comma separated list of file extensions to include. For example '.py,.md,.ipynb'\",\n )\n\n _load = param.Event(\n label=\"LOAD\",\n doc=\"Loads the repository index from the cache if it exists and otherwise from scratch\",\n )\n _reload = param.Event(\n default=False,\n label=\"RELOAD ALL\",\n doc=\"Loads the repository index from scratch\",\n )\n\n def __init__(self):\n super().__init__()\n\n if self.index_exists:\n pn.state.execute(self.load)\n else:\n self._update_status(INDEX_NOT_LOADED)\n\n self._layout = pn.Column(\n self.param.owner,\n self.param.repo,\n self.param.filter_directories,\n self.param.filter_file_extensions,\n pn.pane.HTML(self.github_url),\n pn.widgets.Button.from_param(\n self.param._load,\n button_type=\"primary\",\n disabled=self._is_loading,\n loading=self._is_loading,\n ),\n pn.widgets.Button.from_param(\n self.param._reload,\n button_type=\"primary\",\n button_style=\"outline\",\n disabled=self._is_loading,\n loading=self._is_loading,\n ),\n pn.pane.Markdown(\"### Status\", margin=(3, 5)),\n pn.pane.Str(self.param.status),\n )\n\n def __panel__(self):\n return self._layout\n\n @property\n def _unique_id(self):\n uid = (\n self.owner\n + self.repo\n + self.filter_directories\n + self.filter_file_extensions\n )\n uid = uid.replace(\",\", \"\").replace(\".\", \"\")\n return uid\n\n @property\n def _cached_docs_path(self):\n return CACHE_PATH / f\"docs_{self._unique_id}.pickle\"\n\n @property\n def _cached_index_path(self):\n return CACHE_PATH / f\"index_{self._unique_id}.pickle\"\n\n async def _download_docs(self):\n download_loader(\"GithubRepositoryReader\")\n\n github_client = GithubClient(os.getenv(\"GITHUB_TOKEN\"))\n\n filter_directories = _split_and_clean(self.filter_directories)\n filter_file_extensions = _split_and_clean(self.filter_file_extensions)\n\n loader = GithubRepositoryReader(\n github_client,\n owner=self.owner,\n repo=self.repo,\n filter_directories=(\n filter_directories,\n GithubRepositoryReader.FilterType.INCLUDE,\n ),\n filter_file_extensions=(\n filter_file_extensions,\n GithubRepositoryReader.FilterType.INCLUDE,\n ),\n verbose=True,\n concurrent_requests=10,\n )\n return loader.load_data(branch=\"main\")\n\n async def _get_docs(self):\n docs_path = self._cached_docs_path\n index_path = self._cached_index_path\n\n if docs_path.exists():\n self._update_status(LOADING_EXISTING_DOCS)\n with docs_path.open(\"rb\") as f:\n return pickle.load(f)\n\n self._update_status(LOADING_NEW_DOCS)\n docs = await self._download_docs()\n\n with docs_path.open(\"wb\") as f:\n pickle.dump(docs, f, pickle.HIGHEST_PROTOCOL)\n\n if index_path.exists():\n index_path.unlink()\n\n return docs\n\n async def _create_index(self, docs):\n return VectorStoreIndex.from_documents(docs, use_async=True)\n\n async def _get_index(self, index):\n index_path = self._cached_index_path\n\n if index_path.exists():\n self._update_status(LOADING_EXISTING_INDEX)\n with index_path.open(\"rb\") as f:\n return pickle.load(f)\n\n self._update_status(LOADING_NEW_INDEX)\n index = await self._create_index(index)\n\n with index_path.open(\"wb\") as f:\n pickle.dump(index, f, pickle.HIGHEST_PROTOCOL)\n return index\n\n @param.depends(\"status\")\n def _is_loading(self):\n return self.status not in [INDEX_LOADED, INDEX_NOT_LOADED]\n\n @param.depends(\"status\")\n def _is_not_loading(self):\n return self.status in [INDEX_LOADED, INDEX_NOT_LOADED]\n\n @param.depends(\"_load\", watch=True)\n async def load(self):\n \"\"\"Loads the repository index either from the cache or by downloading from\n the repository\"\"\"\n self._update_status(\"Loading ...\")\n self.value = None\n\n docs = await self._get_docs()\n self.value = await self._get_index(docs)\n self._update_status(INDEX_LOADED)\n\n @param.depends(\"_reload\", watch=True)\n async def reload(self):\n self._update_status(\"Deleteing cached index ...\")\n if self._cached_docs_path.exists():\n self._cached_docs_path.unlink()\n if self._cached_index_path.exists():\n self._cached_index_path.unlink()\n\n await self.load()\n\n def _update_status(self, text):\n with param.edit_constant(self):\n self.status = text\n print(text)\n\n @param.depends(\"owner\", \"repo\")\n def github_url(self):\n \"\"\"Returns a html string with a link to the github repository\"\"\"\n text = f\"{self.owner}/{self.repo}\"\n href = f\"https://github.com/{text}\"\n return f\"{text}\"\n\n @property\n def index_exists(self):\n \"\"\"Returns True if the index already exists\"\"\"\n return self._cached_docs_path.exists() and self._cached_index_path.exists()\n\n\ndef powered_by():\n \"\"\"Returns a component describing the frameworks powering the chat ui\"\"\"\n params = {\"height\": 40, \"sizing_mode\": \"fixed\", \"margin\": (0, 10)}\n return pn.Column(\n pn.pane.Markdown(\"### AI Powered By\", margin=(10, 5, 10, 0)),\n pn.Row(\n pn.pane.Image(LLAMA_INDEX_LOGO, link_url=LLAMA_INDEX_URL, **params),\n pn.pane.Image(CHAT_GPT_LOGO, link_url=CHAT_GPT_URL, **params),\n pn.pane.Image(PANEL_LOGO[pn.config.theme], link_url=PANEL_URL, **params),\n align=\"center\",\n ),\n )\n\n\nasync def chat_component(index: VectorStoreIndex, index_loader: IndexLoader):\n \"\"\"Returns the chat component powering the main area of the application\"\"\"\n if not index:\n return pn.Column(\n pn.chat.ChatMessage(\n \"You are a now a *GitHub Repository assistant*.\",\n user=\"System\",\n ),\n pn.chat.ChatMessage(\n \"Please **load a GitHub Repository** to start chatting with me. This can take from seconds to minutes!\",\n user=\"Assistant\",\n ),\n )\n\n chat_engine = index.as_chat_engine(chat_mode=\"context\", verbose=True)\n\n async def generate_response(contents, user, instance):\n response = await chat_engine.astream_chat(contents)\n text = \"\"\n async for token in response.async_response_gen():\n text += token\n yield text\n\n chat_interface = pn.chat.ChatInterface(\n callback=generate_response,\n sizing_mode=\"stretch_both\",\n )\n chat_interface.send(\n pn.chat.ChatMessage(\n \"You are a now a *GitHub Repository Assistant*.\", user=\"System\"\n ),\n respond=False,\n )\n chat_interface.send(\n pn.chat.ChatMessage(\n f\"Hello! you can ask me anything about {index_loader.github_url()}.\",\n user=\"Assistant\",\n ),\n respond=False,\n )\n return chat_interface\n\n\ndef settings_components(index_loader: IndexLoader):\n \"\"\"Returns a list of the components to add to the sidebar\"\"\"\n return [\n pn.pane.Image(\n CUTE_LLAMA,\n height=250,\n align=\"center\",\n margin=(10, 5, 25, 5),\n link_url=CUTE_LLAMA_URL,\n ),\n \"## Github Repository\",\n index_loader,\n powered_by(),\n ]\n\n\ndef create_chat_ui():\n \"\"\"Returns the Chat UI\"\"\"\n pn.extension(\n sizing_mode=\"stretch_width\", raw_css=[CSS_FIXES_TO_BE_UPSTREAMED_TO_PANEL]\n )\n\n index_loader = IndexLoader()\n\n pn.state.location.sync(\n index_loader,\n parameters={\n \"owner\": \"owner\",\n \"repo\": \"repo\",\n \"filter_directories\": \"folders\",\n \"filter_file_extensions\": \"file_extensions\",\n },\n )\n\n bound_chat_interface = pn.bind(\n chat_component, index=index_loader.param.value, index_loader=index_loader\n )\n\n return pn.template.FastListTemplate(\n title=\"Chat with GitHub\",\n sidebar=settings_components(index_loader),\n main=[bound_chat_interface],\n accent=ACCENT,\n main_max_width=\"1000px\",\n main_layout=None,\n )\n\n\nif pn.state.served:\n create_chat_ui().servable()\n", "repo_name": "run-llama/llama-hub", "sub_path": "llama_hub/llama_packs/panel_chatbot/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 12046, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2565, "dataset": "github-code", "pt": "46", "api": [{"api_name": "nest_asyncio.apply", "line_number": 14, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "panel.chat.ChatMessage.default_avatars.update", "line_number": 46, "usage_type": "call"}, {"api_name": "panel.chat", "line_number": 46, "usage_type": "attribute"}, {"api_name": "panel.chat", "line_number": 52, "usage_type": "attribute"}, {"api_name": "panel.viewable", "line_number": 76, "usage_type": "attribute"}, {"api_name": "llama_index.VectorStoreIndex", "line_number": 80, "usage_type": "name"}, {"api_name": "param.ClassSelector", "line_number": 80, "usage_type": "call"}, {"api_name": "param.String", "line_number": 82, "usage_type": "call"}, {"api_name": "param.String", "line_number": 84, "usage_type": "call"}, {"api_name": "param.String", "line_number": 87, "usage_type": "call"}, {"api_name": "param.String", "line_number": 90, "usage_type": "call"}, {"api_name": "param.String", "line_number": 95, "usage_type": "call"}, {"api_name": "param.Event", "line_number": 101, "usage_type": "call"}, {"api_name": "param.Event", "line_number": 105, "usage_type": "call"}, {"api_name": "panel.state.execute", "line_number": 115, "usage_type": "call"}, {"api_name": "panel.state", "line_number": 115, "usage_type": "attribute"}, {"api_name": "panel.Column", "line_number": 119, "usage_type": "call"}, {"api_name": "panel.pane.HTML", "line_number": 124, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 124, "usage_type": "attribute"}, {"api_name": "panel.widgets.Button.from_param", "line_number": 125, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 125, "usage_type": "attribute"}, {"api_name": "panel.widgets.Button.from_param", "line_number": 131, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 131, "usage_type": "attribute"}, {"api_name": "panel.pane.Markdown", "line_number": 138, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 138, "usage_type": "attribute"}, {"api_name": "panel.pane.Str", "line_number": 139, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 139, "usage_type": "attribute"}, {"api_name": "llama_index.download_loader", "line_number": 165, "usage_type": "call"}, {"api_name": "llama_hub.github_repo.GithubClient", "line_number": 167, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 167, "usage_type": "call"}, {"api_name": "llama_hub.github_repo.GithubRepositoryReader", "line_number": 172, "usage_type": "call"}, {"api_name": "llama_hub.github_repo.GithubRepositoryReader.FilterType", "line_number": 178, "usage_type": "attribute"}, {"api_name": "llama_hub.github_repo.GithubRepositoryReader", "line_number": 178, "usage_type": "name"}, {"api_name": "llama_hub.github_repo.GithubRepositoryReader.FilterType", "line_number": 182, "usage_type": "attribute"}, {"api_name": "llama_hub.github_repo.GithubRepositoryReader", "line_number": 182, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 196, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 202, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 202, "usage_type": "attribute"}, {"api_name": "llama_index.VectorStoreIndex.from_documents", "line_number": 210, "usage_type": "call"}, {"api_name": "llama_index.VectorStoreIndex", "line_number": 210, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 218, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 224, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 224, "usage_type": "attribute"}, {"api_name": "param.depends", "line_number": 227, "usage_type": "call"}, {"api_name": "param.depends", "line_number": 231, "usage_type": "call"}, {"api_name": "param.depends", "line_number": 235, "usage_type": "call"}, {"api_name": "param.depends", "line_number": 246, "usage_type": "call"}, {"api_name": "param.edit_constant", "line_number": 257, "usage_type": "call"}, {"api_name": "param.depends", "line_number": 261, "usage_type": "call"}, {"api_name": "panel.Column", "line_number": 277, "usage_type": "call"}, {"api_name": "panel.pane.Markdown", "line_number": 278, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 278, "usage_type": "attribute"}, {"api_name": "panel.Row", "line_number": 279, "usage_type": "call"}, {"api_name": "panel.pane.Image", "line_number": 280, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 280, "usage_type": "attribute"}, {"api_name": "panel.pane.Image", "line_number": 281, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 281, "usage_type": "attribute"}, {"api_name": "panel.pane.Image", "line_number": 282, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 282, "usage_type": "attribute"}, {"api_name": "panel.config", "line_number": 282, "usage_type": "attribute"}, {"api_name": "llama_index.VectorStoreIndex", "line_number": 288, "usage_type": "name"}, {"api_name": "panel.Column", "line_number": 291, "usage_type": "call"}, {"api_name": "panel.chat.ChatMessage", "line_number": 292, "usage_type": "call"}, {"api_name": "panel.chat", "line_number": 292, "usage_type": "attribute"}, {"api_name": "panel.chat.ChatMessage", "line_number": 296, "usage_type": "call"}, {"api_name": "panel.chat", "line_number": 296, "usage_type": "attribute"}, {"api_name": "panel.chat.ChatInterface", "line_number": 311, "usage_type": "call"}, {"api_name": "panel.chat", "line_number": 311, "usage_type": "attribute"}, {"api_name": "panel.chat.ChatMessage", "line_number": 316, "usage_type": "call"}, {"api_name": "panel.chat", "line_number": 316, "usage_type": "attribute"}, {"api_name": "panel.chat.ChatMessage", "line_number": 322, "usage_type": "call"}, {"api_name": "panel.chat", "line_number": 322, "usage_type": "attribute"}, {"api_name": "panel.pane.Image", "line_number": 334, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 334, "usage_type": "attribute"}, {"api_name": "panel.extension", "line_number": 349, "usage_type": "call"}, {"api_name": "panel.state.location.sync", "line_number": 355, "usage_type": "call"}, {"api_name": "panel.state", "line_number": 355, "usage_type": "attribute"}, {"api_name": "panel.bind", "line_number": 365, "usage_type": "call"}, {"api_name": "panel.template.FastListTemplate", "line_number": 369, "usage_type": "call"}, {"api_name": "panel.template", "line_number": 369, "usage_type": "attribute"}, {"api_name": "panel.state", "line_number": 379, "usage_type": "attribute"}]} {"seq_id": "28588191341", "text": "# - *- coding: utf- 8 - *-\nimport time\n\nfrom pepper.robot import Pepper\ntry:\n import urllib\nexcept:\n import urllib.request as urllib\nimport base64\nimport json\nfrom PIL import Image\nimport random\n\n\ndef uploadPhotoToWeb(photo):\n \"\"\"we need to upload photo to web as we (me) are not able to open it from local folder\"\"\"\n f = open(photo, \"rb\") # open our image file as read only in binary mode\n image_data = f.read() # read in our image file\n b64_image = base64.standard_b64encode(image_data)\n client_id = \"af482612ae6d1c1\" # this the id which we've got after registrating the app on imgur\n headers = {'Authorization': 'Client-ID ' + client_id}\n data = {'image': b64_image, 'title': 'test'}\n request = urllib.Request(url=\"https://api.imgur.com/3/upload.json\", data=urllib.urlencode(data),\n headers=headers)\n response = urllib.urlopen(request).read()\n parse = json.loads(response)\n return parse['data']['link'] #returns a url of the photo\n\n\ndef getRandName():\n \"\"\"returns a random name for the picture in order not to replace the old photo\"\"\"\n randNum = random.randint(0, 1000)\n return \"demoPictures/photo\" + str(randNum) + \".png\"\n\n\nclass PepperDemo:\n def __init__(self, ip_address, port=9559):\n self.robot = None\n self.robot = Pepper(ip_address, port)\n self.robot.set_czech_language()\n self.photoName = None\n self.greetings = [\"Good afternoon\", \"Hello\", \"Hi\", \"Hello everobody\", \"Welcome\"]\n self.asks = [\"May I photograph you?\",\"May I take your picture?\", \"Do you want to make your picture?\"]\n\n\n def wantToTakePic(self):\n \"\"\"recognise answer with google speech reco\"\"\"\n answers = {\"no\": [\"no\", \"no way\", \"not\", \"no no\", \" i don't\", \"i dont know\", \"not today\", \"later\", \"tommorow\"],\n \"yes\": [\"yes\", \"definitely\", \"yep\", \"ok\", \"okey dokey\", \"sure\", \"all yes\", \"you must\",\n \"absolutely\", \"i want\", \"i think so\", \"i agree\", \"if you want\", \"if you insist\", \"probably\", \"maybe\",\n \"yes sir\"]}\n recorded = self.robot.recognize_google(lang=\"en-US\")\n answer = self.getAnswer(answers, recorded)\n if answer == \"no\":\n return False\n elif answer == \"yes\":\n return True\n else:\n return None\n\n\n def getAnswer(self, dic, recorded):\n \"\"\"looks for a recorded answer in a dictionar\"\"\"\n for x in dic.keys():\n if dic[x] in recorded.lower():\n return x\n return None\n\n def welcomeAndAsk(self):\n self.robot.say(random.choice(self.greetings))\n self.robot.greet()\n self.robot.say(random.choice(self.asks))\n\n def takePicture(self):\n self.robot.subscribe_camera(\"camera_top\", 2, 30)\n img = self.robot.get_camera_frame(show=False)\n self.robot.unsubscribe_camera()\n self.robot.play_sound(\"/home/nao/camera1.ogg\")\n im = Image.fromarray(img)\n self.photoName = getRandName()\n im.save(self.photoName)\n\n def showPicture(self):\n link = uploadPhotoToWeb(self.photoName)\n self.robot.show_image(link)\n time.sleep(5)\n self.robot.reset_tablet()\n\n def recogniseAnswerAndDecide(self):\n isTakePic = self.wantToTakePic()\n if isTakePic:\n self.robot.say(\"Perfect. On your marks. 3, 2, 1 .\")\n self.takePicture()\n self.showPicture()\n elif isTakePic is None:\n self.robot.say(\"Sorry, I did not understand you. Please repeat.\")\n self.recogniseAnswerAndDecide()\n else:\n self.robot.say(\"Maybe next time\")\n\n def dealWithRecoErrors(self):\n \"\"\"there is a modifiable grammar error sometimes occurred.\n In order to deal with it you should change language to english and back\"\"\"\n self.robot.set_english_language()\n self.robot.set_czech_language()\n\n def run(self):\n self.dealWithRecoErrors()\n self.welcomeAndAsk()\n self.recogniseAnswerAndDecide()\n\nif __name__ == \"__main__\":\n pepperDemo = PepperDemo(\"10.37.1.232\")\n pepperDemo.run()\n\n", "repo_name": "incognite-lab/Pepper-Controller", "sub_path": "demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 4158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 23, "dataset": "github-code", "pt": "46", "api": [{"api_name": "base64.standard_b64encode", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 23, "usage_type": "name"}, {"api_name": "urllib.request.urlencode", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 25, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "pepper.robot.Pepper", "line_number": 39, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 70, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}]} {"seq_id": "23186544917", "text": "from bson import ObjectId\nfrom dataclasses import field\nfrom typing import Optional\n\nfrom aenum import Enum\nfrom isodate import parse_duration\n\nfrom extensions.authorization.models.authorized_user import AuthorizedUser\nfrom extensions.authorization.models.invitation import Invitation, InvitationType\nfrom extensions.authorization.models.role.default_roles import DefaultRoles\nfrom extensions.authorization.models.role.role import RoleName\nfrom extensions.authorization.validators import (\n check_role_id_valid_for_organization,\n is_common_role,\n)\nfrom extensions.common.sort import SortField\nfrom sdk.auth.use_case.auth_request_objects import BaseAuthRequestObject\nfrom sdk.common.exceptions.exceptions import InvalidRequestException, PermissionDenied\nfrom sdk.common.localization.utils import Language\nfrom sdk.common.usecase.request_object import RequestObject\nfrom sdk.common.utils import inject\nfrom sdk.common.utils.convertible import (\n convertibleclass,\n meta,\n required_field,\n positive_integer_field,\n default_field,\n)\nfrom sdk.common.utils.inject import autoparams\nfrom sdk.common.utils.validators import (\n validate_id,\n validate_email_list,\n validate_object_id,\n validate_object_ids,\n must_be_present,\n must_not_be_present,\n validate_email,\n incorrect_language_to_default,\n must_be_at_least_one_of,\n not_empty,\n)\nfrom sdk.phoenix.config.server_config import Client\n\nINVITATION_PERMISSIONS_PER_ROLE: dict[str, list[str]] = {\n RoleName.ADMINISTRATOR: [\n RoleName.ADMINISTRATOR,\n RoleName.CLINICIAN,\n RoleName.SUPERVISOR,\n RoleName.SUPPORT,\n RoleName.USER,\n ],\n RoleName.CLINICIAN: [RoleName.USER, RoleName.PROXY],\n}\n\n\ndef validate_role_id(field_value: str) -> bool:\n if validate_default_role_id(field_value):\n return True\n if validate_id(field_value):\n return True\n return False\n\n\n@autoparams(\"default_roles\")\ndef validate_default_role_id(value: str, default_roles: DefaultRoles) -> bool:\n return value in default_roles\n\n\n@convertibleclass\nclass SendInvitationRequestObject(RequestObject):\n INVITATION = \"invitation\"\n CLIENT = \"client\"\n SENDER = \"sender\"\n LANGUAGE = \"language\"\n EXTRA_INFO = \"extraInfo\"\n\n invitation: Invitation = required_field()\n client: Client = required_field()\n sender: AuthorizedUser = required_field()\n language: str = field(\n default=Language.EN, metadata=meta(value_to_field=incorrect_language_to_default)\n )\n extraInfo: dict = default_field()\n\n\n@convertibleclass\nclass SendInvitationsRequestObject(BaseAuthRequestObject):\n DEPLOYMENT_IDS = \"deploymentIds\"\n ORGANIZATION_ID = \"organizationId\"\n EMAILS = \"emails\"\n ROLE_ID = \"roleId\"\n PATIENT_ID = \"patientId\"\n EXPIRES_IN = \"expiresIn\"\n SUBMITTER = \"submitter\"\n\n emails: list[str] = required_field(metadata=meta(validate_email_list))\n roleId: str = required_field(metadata=meta(validate_role_id))\n organizationId: str = default_field(metadata=meta(validate_object_id))\n deploymentIds: list[str] = default_field(metadata=meta(validate_object_ids))\n patientId: str = default_field(metadata=meta(validate_object_id))\n expiresIn: str = field(default=\"P1W\", metadata=meta(parse_duration))\n submitter: AuthorizedUser = required_field()\n\n @classmethod\n def validate(cls, instance):\n super().validate(instance)\n is_org_role = check_role_id_valid_for_organization(\n instance.roleId, instance.organizationId\n )\n is_proxy_role = instance.roleId == RoleName.PROXY\n is_common_role = instance.roleId in RoleName.common_roles()\n if is_common_role:\n must_be_at_least_one_of(\n organizationId=instance.organizationId,\n deploymentIds=instance.deploymentIds,\n )\n must_not_be_present(patientId=instance.patientId)\n\n if instance.deploymentIds and len(instance.deploymentIds) > 1:\n must_be_present(organizationId=instance.organizationId)\n elif is_org_role:\n must_be_present(organizationId=instance.organizationId)\n must_not_be_present(deploymentIds=instance.deploymentIds)\n must_not_be_present(patientId=instance.patientId)\n elif is_proxy_role:\n must_be_present(patientId=instance.patientId)\n must_not_be_present(deploymentIds=instance.deploymentIds)\n must_not_be_present(organizationId=instance.organizationId)\n else:\n must_not_be_present(patientId=instance.patientId)\n must_be_present(deploymentIds=instance.deploymentIds)\n if len(instance.deploymentIds) == 0:\n msg = f\"Must be invited to at least one deployment\"\n raise InvalidRequestException(msg)\n\n multiple_deployment_role = (\n instance.roleId in cls.multiple_deployment_roles()\n )\n\n if multiple_deployment_role and len(instance.deploymentIds) > 1:\n must_be_present(organizationId=instance.organizationId)\n\n if not multiple_deployment_role and len(instance.deploymentIds) > 1:\n msg = f\"Role {instance.roleId} can only be invited to one deployment\"\n raise InvalidRequestException(msg)\n\n def check_permission(self, submitter: AuthorizedUser):\n submitter_role = submitter.get_role()\n\n if submitter.is_super_admin():\n return\n\n if is_common_role(submitter_role.id):\n if not self.roleId == RoleName.PROXY:\n if not submitter.role_assignment.is_org():\n if not self.deploymentIds:\n raise PermissionDenied\n else:\n self.validate_resource_access(\n \"deployment\", self.deploymentIds, submitter\n )\n else:\n if self.deploymentIds:\n self.validate_resource_access(\n \"deployment\", self.deploymentIds, submitter\n )\n elif self.organizationId:\n self.validate_resource_access(\n \"organization\", [self.organizationId], submitter\n )\n\n allowed_roles = INVITATION_PERMISSIONS_PER_ROLE.get(submitter_role.id) or []\n if ObjectId.is_valid(self.roleId):\n allowed_roles.append(self.roleId)\n if self.roleId not in allowed_roles:\n raise PermissionDenied\n\n @staticmethod\n def validate_resource_access(\n resource_name: str, resources: list[str], submitter: AuthorizedUser\n ):\n allowed_resources = []\n\n if resource_name == \"organization\":\n allowed_resources = submitter.organization_ids()\n elif resource_name == \"deployment\":\n allowed_resources = submitter.deployment_ids()\n\n if not all(resource_id in allowed_resources for resource_id in resources):\n raise PermissionDenied\n\n @staticmethod\n def multiple_deployment_roles():\n org_keys = inject.instance(DefaultRoles).organization.keys()\n return (set(org_keys) - set(RoleName.org_roles())).union(\n set(RoleName.common_roles())\n )\n\n @property\n def deployment_id(self) -> Optional[str]:\n if self.deploymentIds:\n return self.deploymentIds[0]\n\n\n@convertibleclass\nclass ResendInvitationsRequestObject(BaseAuthRequestObject):\n INVITATION_CODE = \"invitationCode\"\n EMAIL = \"email\"\n\n email: str = required_field(metadata=meta(validate_email))\n invitationCode: str = required_field()\n\n\n@convertibleclass\nclass ResendInvitationsListRequestObject(BaseAuthRequestObject):\n INVITATIONS_LIST = \"invitationsList\"\n\n @convertibleclass\n class InvitationItem:\n INVITATION_CODE = \"invitationCode\"\n EMAIL = \"email\"\n\n email: str = required_field(metadata=meta(validate_email))\n invitationCode: str = required_field(metadata=meta(not_empty))\n\n invitationsList: list[InvitationItem] = required_field(metadata=meta(not_empty))\n\n\n@convertibleclass\nclass GetInvitationLinkRequestObject(BaseAuthRequestObject):\n DEPLOYMENT_ID = \"deploymentId\"\n ROLE_ID = \"roleId\"\n EXPIRES_IN = \"expiresIn\"\n RETRIEVE_SHORTENED = \"retrieveShortened\"\n SENDER_ID = \"senderId\"\n\n deploymentId: str = required_field()\n roleId: str = required_field()\n expiresIn: str = field(default=\"P1W\", metadata=meta(parse_duration))\n retrieveShortened: bool = field(default=False)\n senderId: str = required_field(metadata=meta(validate_object_id))\n\n\n@convertibleclass\nclass DeleteInvitationRequestObject(RequestObject):\n INVITATION_ID = \"invitationId\"\n\n invitationId: str = required_field(metadata=meta(validate_object_id))\n\n\n@convertibleclass\nclass DeleteInvitationsListRequestObject(RequestObject):\n INVITATION_ID_LIST = \"invitationIdList\"\n INVITATION_TYPE = \"invitationType\"\n\n invitationIdList: list[str] = required_field(\n metadata=meta(lambda x: all(map(validate_object_id, x)))\n )\n invitationType: InvitationType = field(default=InvitationType.PERSONAL)\n\n\n@convertibleclass\nclass RetrieveInvitationsRequestObject(BaseAuthRequestObject):\n EMAIL = \"email\"\n ROLE_TYPE = \"roleType\"\n SKIP = \"skip\"\n LIMIT = \"limit\"\n SUBMITTER = \"submitter\"\n INVITATION_TYPE = \"invitationType\"\n SORT_FIELDS = \"sortFields\"\n\n class RoleType(Enum):\n MANAGER = \"Manager\"\n USER = \"User\"\n\n email: str = default_field()\n roleType: RoleType = required_field()\n skip: int = positive_integer_field(default=None, metadata=meta(required=True))\n limit: int = positive_integer_field(default=None, metadata=meta(required=True))\n submitter: AuthorizedUser = required_field()\n invitationType: InvitationType = default_field()\n sortFields: list[SortField] = default_field()\n\n def post_init(self):\n if not self.sortFields:\n self.sortFields = [\n SortField.from_dict(\n {\n SortField.FIELD: Invitation.CREATE_DATE_TIME,\n SortField.DIRECTION: SortField.Direction.DESC.value,\n }\n )\n ]\n\n\n@convertibleclass\nclass InvitationValidityRequestObject(RequestObject):\n INVITATION_CODE = \"invitationCode\"\n\n invitationCode: str = required_field()\n", "repo_name": "meenu-gupta/huma-server-sdk", "sub_path": "extensions/authorization/router/invitation_request_objects.py", "file_name": "invitation_request_objects.py", "file_ext": "py", "file_size_in_byte": 10478, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "extensions.authorization.models.role.role.RoleName.ADMINISTRATOR", "line_number": 45, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 45, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.CLINICIAN", "line_number": 52, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 52, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.ADMINISTRATOR", "line_number": 46, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 46, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.CLINICIAN", "line_number": 47, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 47, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.SUPERVISOR", "line_number": 48, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 48, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.SUPPORT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 49, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.USER", "line_number": 50, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 50, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.USER", "line_number": 52, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName.PROXY", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sdk.common.utils.validators.validate_id", "line_number": 59, "usage_type": "call"}, {"api_name": "extensions.authorization.models.role.default_roles.DefaultRoles", "line_number": 65, "usage_type": "name"}, {"api_name": "sdk.common.utils.inject.autoparams", "line_number": 64, "usage_type": "call"}, {"api_name": "sdk.common.usecase.request_object.RequestObject", "line_number": 70, "usage_type": "name"}, {"api_name": "extensions.authorization.models.invitation.Invitation", "line_number": 77, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 77, "usage_type": "call"}, {"api_name": "sdk.phoenix.config.server_config.Client", "line_number": 78, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 78, "usage_type": "call"}, {"api_name": "extensions.authorization.models.authorized_user.AuthorizedUser", "line_number": 79, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 79, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 80, "usage_type": "call"}, {"api_name": "sdk.common.localization.utils.Language.EN", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sdk.common.localization.utils.Language", "line_number": 81, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 81, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.incorrect_language_to_default", "line_number": 81, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.default_field", "line_number": 83, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 69, "usage_type": "name"}, {"api_name": "sdk.auth.use_case.auth_request_objects.BaseAuthRequestObject", "line_number": 87, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 96, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 96, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_email_list", "line_number": 96, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 97, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 97, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.default_field", "line_number": 98, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 98, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_object_id", "line_number": 98, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.default_field", "line_number": 99, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 99, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_object_ids", "line_number": 99, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.default_field", "line_number": 100, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 100, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_object_id", "line_number": 100, "usage_type": "argument"}, {"api_name": "dataclasses.field", "line_number": 101, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 101, "usage_type": "call"}, {"api_name": "isodate.parse_duration", "line_number": 101, "usage_type": "argument"}, {"api_name": "extensions.authorization.models.authorized_user.AuthorizedUser", "line_number": 102, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 102, "usage_type": "call"}, {"api_name": "extensions.authorization.validators.check_role_id_valid_for_organization", "line_number": 107, "usage_type": "call"}, {"api_name": "extensions.authorization.models.role.role.RoleName.PROXY", "line_number": 110, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 110, "usage_type": "name"}, {"api_name": "extensions.authorization.validators.is_common_role", "line_number": 111, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.common_roles", "line_number": 111, "usage_type": "call"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 111, "usage_type": "name"}, {"api_name": "extensions.authorization.validators.is_common_role", "line_number": 112, "usage_type": "name"}, {"api_name": "sdk.common.utils.validators.must_be_at_least_one_of", "line_number": 113, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_not_be_present", "line_number": 117, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_be_present", "line_number": 120, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_be_present", "line_number": 122, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_not_be_present", "line_number": 123, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_not_be_present", "line_number": 124, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_be_present", "line_number": 126, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_not_be_present", "line_number": 127, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_not_be_present", "line_number": 128, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_not_be_present", "line_number": 130, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_be_present", "line_number": 131, "usage_type": "call"}, {"api_name": "sdk.common.exceptions.exceptions.InvalidRequestException", "line_number": 134, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.must_be_present", "line_number": 141, "usage_type": "call"}, {"api_name": "sdk.common.exceptions.exceptions.InvalidRequestException", "line_number": 145, "usage_type": "call"}, {"api_name": "extensions.authorization.models.authorized_user.AuthorizedUser", "line_number": 147, "usage_type": "name"}, {"api_name": "extensions.authorization.validators.is_common_role", "line_number": 153, "usage_type": "call"}, {"api_name": "extensions.authorization.models.role.role.RoleName.PROXY", "line_number": 154, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 154, "usage_type": "name"}, {"api_name": "sdk.common.exceptions.exceptions.PermissionDenied", "line_number": 157, "usage_type": "name"}, {"api_name": "bson.ObjectId.is_valid", "line_number": 173, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 173, "usage_type": "name"}, {"api_name": "sdk.common.exceptions.exceptions.PermissionDenied", "line_number": 176, "usage_type": "name"}, {"api_name": "extensions.authorization.models.authorized_user.AuthorizedUser", "line_number": 180, "usage_type": "name"}, {"api_name": "sdk.common.exceptions.exceptions.PermissionDenied", "line_number": 190, "usage_type": "name"}, {"api_name": "sdk.common.utils.inject.instance", "line_number": 194, "usage_type": "call"}, {"api_name": "extensions.authorization.models.role.default_roles.DefaultRoles", "line_number": 194, "usage_type": "argument"}, {"api_name": "sdk.common.utils.inject", "line_number": 194, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.org_roles", "line_number": 195, "usage_type": "call"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 195, "usage_type": "name"}, {"api_name": "extensions.authorization.models.role.role.RoleName.common_roles", "line_number": 196, "usage_type": "call"}, {"api_name": "extensions.authorization.models.role.role.RoleName", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 200, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 86, "usage_type": "name"}, {"api_name": "sdk.auth.use_case.auth_request_objects.BaseAuthRequestObject", "line_number": 206, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 210, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 210, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_email", "line_number": 210, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 211, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 205, "usage_type": "name"}, {"api_name": "sdk.auth.use_case.auth_request_objects.BaseAuthRequestObject", "line_number": 215, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 223, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 223, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_email", "line_number": 223, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 224, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 224, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.not_empty", "line_number": 224, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 218, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 226, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 226, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.not_empty", "line_number": 226, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 214, "usage_type": "name"}, {"api_name": "sdk.auth.use_case.auth_request_objects.BaseAuthRequestObject", "line_number": 230, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 237, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 238, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 239, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 239, "usage_type": "call"}, {"api_name": "isodate.parse_duration", "line_number": 239, "usage_type": "argument"}, {"api_name": "dataclasses.field", "line_number": 240, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 241, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 241, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_object_id", "line_number": 241, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 229, "usage_type": "name"}, {"api_name": "sdk.common.usecase.request_object.RequestObject", "line_number": 245, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 248, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 248, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_object_id", "line_number": 248, "usage_type": "argument"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 244, "usage_type": "name"}, {"api_name": "sdk.common.usecase.request_object.RequestObject", "line_number": 252, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 256, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 257, "usage_type": "call"}, {"api_name": "sdk.common.utils.validators.validate_object_id", "line_number": 257, "usage_type": "argument"}, {"api_name": "extensions.authorization.models.invitation.InvitationType", "line_number": 259, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 259, "usage_type": "call"}, {"api_name": "extensions.authorization.models.invitation.InvitationType.PERSONAL", "line_number": 259, "usage_type": "attribute"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 251, "usage_type": "name"}, {"api_name": "sdk.auth.use_case.auth_request_objects.BaseAuthRequestObject", "line_number": 263, "usage_type": "name"}, {"api_name": "aenum.Enum", "line_number": 272, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.default_field", "line_number": 276, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 277, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.positive_integer_field", "line_number": 278, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 278, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.positive_integer_field", "line_number": 279, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.meta", "line_number": 279, "usage_type": "call"}, {"api_name": "extensions.authorization.models.authorized_user.AuthorizedUser", "line_number": 280, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 280, "usage_type": "call"}, {"api_name": "extensions.authorization.models.invitation.InvitationType", "line_number": 281, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.default_field", "line_number": 281, "usage_type": "call"}, {"api_name": "extensions.common.sort.SortField", "line_number": 282, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.default_field", "line_number": 282, "usage_type": "call"}, {"api_name": "extensions.common.sort.SortField.from_dict", "line_number": 287, "usage_type": "call"}, {"api_name": "extensions.common.sort.SortField", "line_number": 287, "usage_type": "name"}, {"api_name": "extensions.common.sort.SortField.FIELD", "line_number": 289, "usage_type": "attribute"}, {"api_name": "extensions.common.sort.SortField", "line_number": 289, "usage_type": "name"}, {"api_name": "extensions.common.sort.SortField.DIRECTION", "line_number": 290, "usage_type": "attribute"}, {"api_name": "extensions.common.sort.SortField", "line_number": 290, "usage_type": "name"}, {"api_name": "extensions.authorization.models.invitation.Invitation.CREATE_DATE_TIME", "line_number": 289, "usage_type": "attribute"}, {"api_name": "extensions.authorization.models.invitation.Invitation", "line_number": 289, "usage_type": "name"}, {"api_name": "extensions.common.sort.SortField.Direction", "line_number": 290, "usage_type": "attribute"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 262, "usage_type": "name"}, {"api_name": "sdk.common.usecase.request_object.RequestObject", "line_number": 297, "usage_type": "name"}, {"api_name": "sdk.common.utils.convertible.required_field", "line_number": 300, "usage_type": "call"}, {"api_name": "sdk.common.utils.convertible.convertibleclass", "line_number": 296, "usage_type": "name"}]} {"seq_id": "73226820938", "text": "# -*- coding:utf-8 -*-\nimport requests, os\nfrom flask import render_template, request, flash, redirect, url_for, send_from_directory, send_file\nfrom flask_login import current_user, login_required\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom pandas import DataFrame\nfrom pydrive.auth import GoogleAuth\nfrom reportlab.lib import styles\nfrom reportlab.lib.enums import TA_RIGHT, TA_CENTER\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.lib.utils import ImageReader\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Image, TableStyle, Table, Spacer\nfrom sqlalchemy import cast, Date\nfrom werkzeug.utils import secure_filename\nfrom . import purchase_tracker_bp as purchase_tracker\nfrom .forms import *\nfrom datetime import datetime\nfrom pytz import timezone\nfrom pydrive.drive import GoogleDrive\nfrom .models import PurchaseTrackerAccount, PurchaseTrackerForm\nfrom flask_mail import Message\nfrom ..main import mail\nfrom ..roles import finance_procurement_permission\n\n# Upload images for Google Drive\n\n\nFOLDER_ID = \"1JYkU2kRvbvGnmpQ1Tb-TcQS-vWQKbXvy\"\n\njson_keyfile = requests.get(os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')).json()\n\nbangkok = timezone('Asia/Bangkok')\n\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n\n@purchase_tracker.route('/official/')\n@login_required\ndef landing_page():\n return render_template('purchase_tracker/first_page.html')\n\n\n@purchase_tracker.route('/personnel/personnel_index')\ndef staff_index():\n return render_template('purchase_tracker/personnel/personnel_index.html')\n\n\n@purchase_tracker.route('/personnel/personnel_index/e-form/method/select/')\ndef select_form(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n return render_template('purchase_tracker/personnel/alternative_form.html', account=account)\n\n\n@purchase_tracker.route('/main')\ndef index():\n return render_template('purchase_tracker/index.html')\n\n\n@purchase_tracker.route('/account/create', methods=['GET', 'POST'])\n@login_required\ndef add_account():\n form = CreateAccountForm()\n if request.method == 'POST':\n filename = ''\n account = PurchaseTrackerAccount()\n form.populate_obj(account)\n account.creation_date = bangkok.localize(datetime.now())\n account.staff = current_user\n drive = initialize_gdrive()\n if form.upload.data:\n if not filename or (form.upload.data.filename != filename):\n upfile = form.upload.data\n filename = secure_filename(upfile.filename)\n upfile.save(filename)\n file_drive = drive.CreateFile({'title': filename,\n 'parents': [{'id': FOLDER_ID, \"kind\": \"drive#fileLink\"}]})\n file_drive.SetContentFile(filename)\n try:\n file_drive.Upload()\n permission = file_drive.InsertPermission({'type': 'anyone',\n 'value': 'anyone',\n 'role': 'reader'})\n except:\n flash('Failed to upload the attached file to the Google drive.', 'danger')\n else:\n flash('The attached file has been uploaded to the Google drive', 'success')\n account.url = file_drive['id']\n\n db.session.add(account)\n db.session.commit()\n flash(u'บันทึกข้อมูลสำเร็จ.', 'success')\n return render_template('purchase_tracker/personnel/personnel_index.html')\n # Check Error\n else:\n for er in form.errors:\n flash(er, 'danger')\n return render_template('purchase_tracker/create_account.html', form=form)\n\n\ndef initialize_gdrive():\n gauth = GoogleAuth()\n scopes = ['https://www.googleapis.com/auth/drive']\n gauth.credentials = ServiceAccountCredentials.from_json_keyfile_dict(json_keyfile, scopes)\n return GoogleDrive(gauth)\n\n\n@purchase_tracker.route('/track/')\n@purchase_tracker.route('/track/', methods=['GET'])\ndef track(account_id=None):\n list_type = request.args.get('list_type')\n if list_type == \"myAccount\" or list_type is None:\n accounts = PurchaseTrackerAccount.query.filter_by(staff_id=current_user.id).all()\n elif list_type == \"ourAccount\":\n org = current_user.personal_info.org\n accounts = [account for account in PurchaseTrackerAccount.query.all()\n if account.staff.personal_info.org == org]\n return render_template('purchase_tracker/tracking.html',\n account_id=account_id, accounts=accounts, list_type=list_type)\n\n\n@purchase_tracker.route('/track//view')\ndef view_info_track(account_id=None):\n from sqlalchemy import desc\n if account_id:\n account = PurchaseTrackerAccount.query.get(account_id)\n # better make use of the relationship!\n activities = [a.to_list() for a in account.records.all()]\n else:\n flash(u'ข้อมูลจะปรากฎเมื่อหน่วยงานคลังและพัสดุอัพเดตเรียบร้อย', 'warning')\n activities = []\n # activities = [a.to_list() for a in PurchaseTrackerStatus.query.filter_by(account_id=account_id)\n # .order_by(PurchaseTrackerStatus.start_date)]\n if not activities:\n default_date = datetime.now().isoformat()\n else:\n default_date = activities[-1][3]\n return render_template('purchase_tracker/view_info_track.html',\n account_id=account_id, account=account, desc=desc,\n PurchaseTrackerStatus=PurchaseTrackerStatus,\n activities=activities, default_date=default_date)\n\n\n@purchase_tracker.route('/account//edit', methods=['GET', 'POST'])\n@login_required\ndef edit_account(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n form = CreateAccountForm(obj=account)\n if request.method == 'POST':\n filename = ''\n form.populate_obj(account)\n account.creation_date = bangkok.localize(datetime.now())\n account.staff = current_user\n drive = initialize_gdrive()\n if form.upload.data:\n if not filename or (form.upload.data.filename != filename):\n upfile = form.upload.data\n filename = secure_filename(upfile.filename)\n upfile.save(filename)\n file_drive = drive.CreateFile({'title': filename,\n 'parents': [{'id': FOLDER_ID, \"kind\": \"drive#fileLink\"}]})\n file_drive.SetContentFile(filename)\n try:\n file_drive.Upload()\n permission = file_drive.InsertPermission({'type': 'anyone',\n 'value': 'anyone',\n 'role': 'reader'})\n except:\n flash('Failed to upload the attached file to the Google drive.', 'danger')\n else:\n flash('The attached file has been uploaded to the Google drive', 'success')\n purchase_tracker.url = file_drive['id']\n\n db.session.add(account)\n db.session.commit()\n flash(u'บันทึกข้อมูลสำเร็จ.', 'success')\n return redirect(url_for('purchase_tracker.track'))\n # Check Error\n else:\n for er in form.errors:\n flash(er, 'danger')\n return render_template('purchase_tracker/edit_account.html', form=form, account_id=account_id)\n\n\n@purchase_tracker.route('/accounts//cancel', methods=['GET'])\n@login_required\ndef cancel_account(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n if not account.cancelled_datetime:\n account.cancelled_datetime = datetime.now(tz=bangkok)\n account.cancelled_by = current_user\n db.session.add(account)\n db.session.commit()\n flash(u'ยกเลิกบัญชีเรียบร้อยแล้ว', 'success')\n else:\n flash(u'บัญชีนี้ถูกยุติการดำเนินการแล้ว', 'warning')\n next = request.args.get('next')\n if next:\n return redirect(next)\n return redirect(url_for('purchase_tracker.track', account_id=account_id))\n\n\n@purchase_tracker.route('/accounts//close', methods=['GET'])\n@login_required\ndef close_account(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n if not account.end_datetime:\n account.end_datetime = datetime.now(tz=bangkok)\n db.session.add(account)\n db.session.commit()\n flash(u'ปิดบัญชีเรียบร้อยแล้ว', 'success')\n else:\n flash(u'บัญชีนี้ถูกปิดการดำเนินการแล้ว', 'warning')\n return redirect(url_for('purchase_tracker.update_status', account_id=account_id))\n\n\n@purchase_tracker.route('/supplies/')\n@finance_procurement_permission.require()\ndef supplies():\n from sqlalchemy import desc\n accounts = PurchaseTrackerAccount.query.all()\n return render_template('purchase_tracker/procedure_supplies.html',\n accounts=accounts,\n desc=desc,\n PurchaseTrackerStatus=PurchaseTrackerStatus)\n\n\n@purchase_tracker.route('/description')\ndef description():\n return render_template('purchase_tracker/description.html')\n\n\n@purchase_tracker.route('/contact')\ndef contact():\n return render_template('purchase_tracker/contact_us.html')\n\n\ndef send_mail(recp, title, message):\n message = Message(subject=title, body=message, recipients=recp)\n mail.send(message)\n\n\n@purchase_tracker.route('/account//update', methods=['GET', 'POST'])\n@finance_procurement_permission.require()\n@login_required\ndef update_status(account_id):\n form = StatusForm()\n account = PurchaseTrackerAccount.query.get(account_id)\n if request.method == 'POST':\n if form.validate_on_submit():\n status = PurchaseTrackerStatus()\n form.populate_obj(status)\n status.account_id = account_id\n status.status_date = bangkok.localize(datetime.now())\n status.creation_date = bangkok.localize(datetime.now())\n status.cancel_datetime = bangkok.localize(datetime.now())\n status.update_datetime = bangkok.localize(datetime.now())\n status.staff = current_user\n if not form.other_activity.data and not form.activity.data:\n flash(u'กรุณาเลือกหัวข้อกิจกรรมหรือใส่กิจกรรมอื่นๆ.', 'danger')\n return redirect(\n url_for('purchase_tracker.update_status', account_id=account_id, form=form, account=account))\n db.session.add(status)\n db.session.commit()\n title = u'แจ้งเตือนการปรับเปลี่ยนสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {}'.format(status.account.number)\n message = u'เรียน {}\\n\\nสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {} คือ {}' \\\n .format(current_user.personal_info.fullname, status.account.number,\n status.other_activity or status.activity.activity)\n message += u'\\n\\n======================================================'\n message += u'\\nอีเมลนี้ส่งโดยระบบอัตโนมัติ กรุณาอย่าตอบกลับ ' \\\n u'หากมีปัญหาใดๆเกี่ยวกับเว็บไซต์กรุณาติดต่อหน่วยข้อมูลและสารสนเทศ '\n message += u'\\nThis email was sent by an automated system. Please do not reply.' \\\n u' If you have any problem about website, please contact the IT unit.'\n send_mail([u'{}@mahidol.ac.th'.format(account.staff.email)], title, message)\n flash(u'อัพเดตข้อมูลเรียบร้อย', 'success')\n form.activity.data = \"\"\n form.other_activity.data = \"\"\n form.comment.data = \"\"\n # Check Error\n else:\n flash(form.errors, 'danger')\n\n activities = [a.to_list() for a in PurchaseTrackerStatus.query.filter_by(account_id=account_id)\n .order_by(PurchaseTrackerStatus.start_date)]\n if not activities:\n default_date = datetime.now().isoformat()\n else:\n default_date = activities[-1][3]\n return render_template('purchase_tracker/update_record.html',\n account_id=account_id, form=form, activities=activities, account=account,\n default_date=default_date)\n\n\n@purchase_tracker.route('/account//status//edit', methods=['GET', 'POST'])\n@finance_procurement_permission.require()\n@login_required\ndef edit_update_status(account_id, status_id):\n status = PurchaseTrackerStatus.query.get(status_id)\n form = StatusForm(obj=status)\n if request.method == 'POST':\n if form.validate_on_submit():\n form.populate_obj(status)\n status.account_id = account_id\n status.status_date = bangkok.localize(datetime.now())\n status.creation_date = bangkok.localize(datetime.now())\n status.cancel_datetime = bangkok.localize(datetime.now())\n status.update_datetime = bangkok.localize(datetime.now())\n status.staff = current_user\n db.session.add(status)\n db.session.commit()\n title = u'แจ้งเตือนการแก้ไขปรับเปลี่ยนสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {}'.format(\n status.account.number)\n message = u'เรียน {}\\n\\nสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {} คือ {}' \\\n .format(current_user.personal_info.fullname, status.account.number,\n status.other_activity or status.activity.activity)\n message += u'\\n\\n======================================================'\n message += u'\\nอีเมลนี้ส่งโดยระบบอัตโนมัติ กรุณาอย่าตอบกลับ ' \\\n u'หากมีปัญหาใดๆเกี่ยวกับเว็บไซต์กรุณาติดต่อหน่วยข้อมูลและสารสนเทศ '\n message += u'\\nThis email was sent by an automated system. Please do not reply.' \\\n u' If you have any problem about website, please contact the IT unit.'\n send_mail([u'{}@mahidol.ac.th'.format(status.account.staff.email)], title, message)\n flash(u'แก้ไขข้อมูลเรียบร้อย', 'success')\n return redirect(url_for('purchase_tracker.update_status', status_id=status.id, account_id=account_id))\n return render_template('purchase_tracker/edit_update_record.html',\n account_id=account_id, form=form)\n\n\n@purchase_tracker.route('/account//status//delete')\n@finance_procurement_permission.require()\n@login_required\ndef delete_update_status(account_id, status_id):\n if account_id:\n status = PurchaseTrackerStatus.query.get(status_id)\n flash(u'The update status has been removed.')\n db.session.delete(status)\n db.session.commit()\n return redirect(url_for('purchase_tracker.update_status', account_id=account_id))\n\n\n@purchase_tracker.route('/create//activity', methods=['GET', 'POST'])\n@finance_procurement_permission.require()\n@login_required\ndef add_activity(account_id):\n activity = db.session.query(PurchaseTrackerActivity)\n form = CreateActivityForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_activity = PurchaseTrackerActivity()\n form.populate_obj(new_activity)\n db.session.add(new_activity)\n db.session.commit()\n flash(u'บันทึกการเพิ่มกิจกรรมใหม่สำเร็จ.', 'success')\n return redirect(url_for('purchase_tracker.update_status', account_id=account_id))\n # Check Error\n else:\n for er in form.errors:\n flash(er, 'danger')\n return render_template('purchase_tracker/create_activity.html', form=form, activity=activity, account_id=account_id)\n\n\n@purchase_tracker.route('/dashboard', methods=['GET', 'POST'])\ndef show_info_page():\n start_date = None\n end_date = None\n account_query = PurchaseTrackerAccount.query.all()\n form = ReportDateForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n start_date = datetime.strptime(form.start_date.data, '%d-%m-%Y')\n end_date = datetime.strptime(form.end_date.data, '%d-%m-%Y')\n account_query = PurchaseTrackerAccount.query.filter(\n cast(PurchaseTrackerAccount.booking_date, Date) >= start_date) \\\n .filter(cast(PurchaseTrackerAccount.booking_date, Date) <= end_date)\n else:\n flash(form.errors, 'danger')\n return render_template('purchase_tracker/info_page.html', account_query=account_query, form=form,\n start_date=start_date, end_date=end_date)\n\n\n@purchase_tracker.route('/dashboard/info/download', methods=['GET'])\ndef dashboard_info_download():\n records = []\n start_date = request.args.get('start_date')\n end_date = request.args.get('end_date')\n if start_date and end_date:\n accounts = PurchaseTrackerAccount.query.filter(cast(PurchaseTrackerAccount.booking_date, Date) >= start_date) \\\n .filter(cast(PurchaseTrackerAccount.booking_date, Date) <= end_date)\n else:\n accounts = PurchaseTrackerAccount.query.all()\n\n for account in accounts:\n for record in account.records:\n records.append({\n u'เลขที่หนังสือ': u\"{}\".format(account.number),\n u'วันที่หนังสือ': u\"{}\".format(account.booking_date),\n u'ชื่อ': u\"{}\".format(account.subject),\n u'วงเงินหลักการ': u\"{:,.2f}\".format(account.amount),\n u'รูปแบบหลักการ': u\"{}\".format(account.formats),\n u'ผู้สร้าง account โดย': u\"{}\".format(account.staff.personal_info.fullname),\n u'หน่วยงาน/ภาควิชา': u\"{}\".format(account.staff.personal_info.org.name),\n u'กิจกรรม': u\"{}\".format(record.other_activity or record.activity.activity),\n u'ผู้รับผิดชอบ': u\"{}\".format(record.staff.personal_info.fullname),\n u'วันเริ่มกิจกรรม': u\"{}\".format(record.start_date),\n u'วันสิ้นสุดกิจกรรม': u\"{}\".format(record.end_date),\n u'หมายเหตุเพิ่มเติม': u\"{}\".format(record.comment),\n u'เวลาดำเนินกิจกรรม': u\"{}\".format(record.weekdays),\n })\n df = DataFrame(records)\n df.to_excel('account_summary.xlsx')\n return send_file(os.path.join(os.getcwd(), 'account_summary.xlsx'))\n\n\n# @purchase_tracker.route('/personnel/personnel_index/e-form/create//', methods=['GET', 'POST'])\n# @login_required\n# def create_form(account_id, form_code):\n# account = PurchaseTrackerAccount.query.get(account_id)\n# MTPCform = create_MTPCForm(acnt=account)\n# form = MTPCform()\n# if form.validate_on_submit():\n# new_form = PurchaseTrackerForm()\n# form.populate_obj(new_form)\n# new_form.staff = current_user\n# db.session.add(new_form)\n# db.session.commit()\n# flash(u'บันทึกข้อมูลสำเร็จ.', 'success')\n# form_letter(new_form, account)\n# return send_file('e-form.pdf')\n# # Check Error\n# else:\n# for er in form.errors:\n# flash(\"{}:{}\".format(er,form.errors[er]), 'danger')\n# return render_template('purchase_tracker/personnel/create_form_{}.html'.format(form_code), form=form, account=account)\n#\n#\n# sarabun_font = TTFont('Sarabun', 'app/static/fonts/THSarabunNew.ttf')\n# pdfmetrics.registerFont(sarabun_font)\n# style_sheet = getSampleStyleSheet()\n# style_sheet.add(ParagraphStyle(name='ThaiStyle', fontName='Sarabun'))\n# style_sheet.add(ParagraphStyle(name='ThaiStyleNumber', fontName='Sarabun', alignment=TA_RIGHT))\n# style_sheet.add(ParagraphStyle(name='ThaiStyleCenter', fontName='Sarabun', alignment=TA_CENTER))\n#\n#\n# def form_letter(form, account):\n# logo = Image('app/static/img/logo-MU.jpg', 60, 60)\n#\n# def all_page_setup(canvas, doc):\n# canvas.saveState()\n# logo_image = ImageReader('app/static/img/logo-MU.jpg')\n# canvas.drawImage(logo_image, 10, 700, width=70, height=70)\n# canvas.restoreState()\n#\n# doc = SimpleDocTemplate(\"app/e-form.pdf\",\n# pagesize=letter,\n# rightMargin=72,\n# leftMargin=72,\n# topMargin=72,\n# bottomMargin=18)\n#\n#\n# data = [ Paragraph(u'ภาควิชา / ศูนย์ {}'.format(account.staff.personal_info.org.name), style=style_sheet['ThaiStyle']),\n# Paragraph(u'ที่ {}'.format(form.account.number), style=style_sheet['ThaiStyle']),\n# Paragraph(u'วันที่ {}'.format(form.account.creation_date), style=style_sheet['ThaiStyle']),\n# Paragraph(u'เรื่อง {}'.format(form.account.subject), style=style_sheet['ThaiStyle']),\n# Paragraph(u'ข้าพเจ้า {}'.format(form.name), style=style_sheet['ThaiStyle']),\n# Paragraph(u'เหตุผลและความจำเป็นเร่งด่วนที่ต้องซื้อหรือจ้าง {}'.format(form.reason),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'รายละเอียดของพัสดุที่ซื้อหรือจ้าง {}'.format(form.account.desc),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'วงเงินที่ซื้อหรือจ้างในครั้งนี้เป็นเงินเท่าไหร่ {}'.format(form.account.amount),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'จาก {}'.format(form.account.amount),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'ตามใบส่งของ/ใบเสร็จรับเงินเล่มที่ {}'.format(form.book),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'เลขที่ {}'.format(form.number),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'วันที่ {}'.format(form.receipt_date),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'โดยขอเบิกจ่ายจากเงิน {}'.format(form.disbursement_method),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'ประจำปีงบประมาณ {}'.format(form.financial_year),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'วันที่ {}'.format(form.receipt_date),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'รหัสศูนย์ต้นทุน {}'.format(form.cost_center),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'รหัสใบสั่งงานภายใน {}'.format(form.internal_order),\n# style=style_sheet['ThaiStyle']),\n# ]\n# data.append(Spacer(1, 12))\n#\n# doc.build(data, onLaterPages=all_page_setup, onFirstPage=all_page_setup)\n\n", "repo_name": "MUMT-IT/mis2018", "sub_path": "app/purchase_tracker/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 25624, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "46", "api": [{"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 33, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 43, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 71, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 100, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 63, "usage_type": "name"}, {"api_name": "pydrive.auth.GoogleAuth", "line_number": 104, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_dict", "line_number": 106, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 106, "usage_type": "name"}, {"api_name": "pydrive.drive.GoogleDrive", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.filter_by", "line_number": 115, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 115, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 115, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 115, "usage_type": "name"}, {"api_name": "flask_login.current_user.personal_info", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 117, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.all", "line_number": 118, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 118, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 118, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 120, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query.get", "line_number": 128, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 128, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 140, "usage_type": "call"}, {"api_name": "sqlalchemy.desc", "line_number": 141, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.get", "line_number": 149, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 149, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 151, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 151, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 154, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 155, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 178, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 183, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 184, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 147, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.get", "line_number": 190, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 190, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 190, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 192, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 193, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 202, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 202, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 188, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.get", "line_number": 208, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 208, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 208, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 210, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 210, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 215, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 216, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 216, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 206, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.all", "line_number": 223, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 223, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 223, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 224, "usage_type": "call"}, {"api_name": "sqlalchemy.desc", "line_number": 226, "usage_type": "name"}, {"api_name": "roles.finance_procurement_permission.require", "line_number": 220, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission", "line_number": 220, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 232, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 237, "usage_type": "call"}, {"api_name": "flask_mail.Message", "line_number": 241, "usage_type": "call"}, {"api_name": "main.mail.send", "line_number": 242, "usage_type": "call"}, {"api_name": "main.mail", "line_number": 242, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.get", "line_number": 250, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 250, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 250, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 251, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 251, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 256, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 256, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 257, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 257, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 258, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 258, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 259, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 259, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 260, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 264, "usage_type": "call"}, {"api_name": "flask_login.current_user.personal_info", "line_number": 269, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 269, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 277, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 283, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 288, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 288, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 291, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission.require", "line_number": 246, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission", "line_number": 246, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 302, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 302, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 306, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 306, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 307, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 307, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 308, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 308, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 309, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 309, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 310, "usage_type": "name"}, {"api_name": "flask_login.current_user.personal_info", "line_number": 316, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 316, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 324, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 325, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 325, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 326, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission.require", "line_number": 297, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission", "line_number": 297, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 298, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 336, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 339, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 339, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission.require", "line_number": 331, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission", "line_number": 331, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 332, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 348, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 348, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 354, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 355, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 355, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 359, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 360, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission.require", "line_number": 343, "usage_type": "call"}, {"api_name": "roles.finance_procurement_permission", "line_number": 343, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 344, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.all", "line_number": 367, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 367, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 367, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 369, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 369, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 371, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 371, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 372, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 372, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.filter", "line_number": 373, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 373, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 373, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 374, "usage_type": "call"}, {"api_name": "sqlalchemy.Date", "line_number": 374, "usage_type": "argument"}, {"api_name": "models.PurchaseTrackerAccount.booking_date", "line_number": 374, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 374, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 375, "usage_type": "call"}, {"api_name": "sqlalchemy.Date", "line_number": 375, "usage_type": "argument"}, {"api_name": "models.PurchaseTrackerAccount.booking_date", "line_number": 375, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 375, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 377, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 378, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 385, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 385, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 385, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 386, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 386, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 386, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.filter", "line_number": 388, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 388, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 388, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 388, "usage_type": "call"}, {"api_name": "sqlalchemy.Date", "line_number": 388, "usage_type": "argument"}, {"api_name": "models.PurchaseTrackerAccount.booking_date", "line_number": 388, "usage_type": "attribute"}, {"api_name": "sqlalchemy.cast", "line_number": 389, "usage_type": "call"}, {"api_name": "sqlalchemy.Date", "line_number": 389, "usage_type": "argument"}, {"api_name": "models.PurchaseTrackerAccount.booking_date", "line_number": 389, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 389, "usage_type": "name"}, {"api_name": "models.PurchaseTrackerAccount.query.all", "line_number": 391, "usage_type": "call"}, {"api_name": "models.PurchaseTrackerAccount.query", "line_number": 391, "usage_type": "attribute"}, {"api_name": "models.PurchaseTrackerAccount", "line_number": 391, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 410, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 412, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 412, "usage_type": "call"}, {"api_name": "os.path", "line_number": 412, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 412, "usage_type": "call"}]} {"seq_id": "3389745396", "text": "import numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n\n# create the flask app\napp = Flask(__name__)\n\n# load the pickle model\nmodel = pickle.load(open(\"model.pkl\", \"rb\"))\n\n\n# defining the homepage\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n# defining the predict page\n@app.route(\"/predict\", methods=[\"POST\"])\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n input = request.get_json()\n features = [np.array(list(input.values()))]\n prediction = model.predict(features)[0]\n print(features)\n if prediction == 0:\n prediction = \"Drizzle\"\n elif prediction == 1:\n prediction = \"Foggy\"\n elif prediction == 2:\n prediction = \"Rain\"\n elif prediction == 3:\n prediction = \"Snow\"\n elif prediction == 4:\n prediction = \"Sunny\"\n\n return jsonify({\"prediction_text\": \"{}\".format(prediction)})\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n", "repo_name": "Sahilgupta4103/WeatherPred-webApp", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 37, "usage_type": "call"}]} {"seq_id": "7632612459", "text": "#!/usr/bin/env python\n\n\"\"\"\nCalculate enrichment statistics for two sets of fasta files\nInputs:\n two fasta files to compare\n file containing patterns to check\nOutputs:\n pickled dictionary of pattern enrichments\nBen Ober-Reynolds\n\"\"\"\n\n\nimport os\nimport sys\nimport re\nimport time\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom Bio import SeqIO\nfrom joblib import Parallel, delayed\n\n\ndef main():\n\n # set up command line argument parser\n parser = argparse.ArgumentParser(description='Calculate motif densities \\\n for a target and a background set of fastas.')\n group = parser.add_argument_group('required arguments:')\n group.add_argument('-fi', '--fasta_of_interest', required=True,\n help='file containing clusters of interest')\n group.add_argument('-fb', '--background_fasta', required=True,\n help='file containing background clusters')\n group.add_argument('-pf', '--pattern_file', required=True,\n help='file containing patterns to check for. Format: \\\n {pattern name}\\\\t{regex_pattern}')\n group = parser.add_argument_group('optional arguments')\n group.add_argument('-od', '--output_directory', default=\".\",\n help='output directory for statistics file and figures. \\\n Default is current directory')\n group.add_argument('-op', '--output_prefix', default=\"enrichment\",\n help='output prefix for results file and figures')\n group.add_argument('-isn', '--interesting_seq_name', \n default=\"Sequences of Interest\",\n help='The name of the sequence of interest pool. Default is \\\n \"Sequences of Interest\"')\n group.add_argument('-bsn', '--background_seq_name', \n default=\"Background Sequences\", help='The name of the background \\\n sequence pool. Default is \"Background Sequences\"')\n group.add_argument('-rc', '--reverse_comp', default=\"y\",\n help='also calculate enrichment in reverse complement of each pool \\\n [y/n]? Default is y.')\n group.add_argument('-nb', '--num_bootstraps', type=int, default=1000,\n help='number of times to resample pools for enrichment calculation. \\\n Default is 1000.')\n group.add_argument('-n', '--num_cores', type=int, default=1,\n help='number of cores to use for bootstrapping.')\n\n # print help if no arguments provided\n if len(sys.argv) <= 1:\n parser.print_help()\n sys.exit()\n\n # parse command line arguments\n args = parser.parse_args()\n numCores = args.num_cores\n\n # Pre-defined variables, constants, and settings\n input_file_format = 'fasta'\n rev_c_tag = \"Rev-Comp\"\n output_prefix = time.strftime(\"%Y%m%d\") + \"_\" + args.output_prefix\n pickle_file_ext = \"p\"\n\n # Do some error checking before running this long script:\n output_dir = args.output_directory\n if not os.path.isdir(output_dir):\n print(\"Error: invalid output directory. Exiting...\")\n sys.exit()\n \n # Read in files:\n seqs_of_interest = read_fasta(args.fasta_of_interest, input_file_format)\n background_seqs = read_fasta(args.background_fasta, input_file_format)\n pattern_dict = read_pattern_file(args.pattern_file)\n\n # Find smallest pool size:\n pool_size = min([len(seqs_of_interest), len(background_seqs)])\n\n # seq pool dict:\n seq_pool_dict = {args.interesting_seq_name: seqs_of_interest, \n args.background_seq_name: background_seqs}\n\n # Results dictionary:\n density_result_dict = {}\n for pname in pattern_dict.keys():\n density_result_dict[pname] = {}\n\n # compare to reverse complement?\n if args.reverse_comp == 'y':\n interesting_seq_rc_name = args.interesting_seq_name + \" \" + rev_c_tag\n background_seq_rc_name = args.background_seq_name + \" \" + rev_c_tag\n rc_seqs_of_interest = reverse_comp(seqs_of_interest)\n rc_background_seqs = reverse_comp(background_seqs)\n seq_pool_dict[interesting_seq_rc_name] = rc_seqs_of_interest\n seq_pool_dict[background_seq_rc_name] = rc_background_seqs\n\n # calculate motif density for each pattern\n if numCores > 1:\n with Parallel(n_jobs=numCores, verbose=10) as parallel: \n for pname in pattern_dict.keys():\n for pool_name in seq_pool_dict.keys():\n densities = []\n print(\"Calculating density of pattern '{}' in pool '{}'\\\n \".format(pname, pool_name))\n densities = parallel(delayed(calc_resampled_motif_density)\\\n (seq_pool_dict[pool_name], pool_size, pattern_dict[pname])\n for i in range(args.num_bootstraps))\n density_result_dict[pname][pool_name] = densities\n else:\n for pname in pattern_dict.keys():\n for pool_name in seq_pool_dict.keys():\n densities = []\n print(\"Calculating density of pattern '{}' in pool '{}'\\\n \".format(pname, pool_name))\n densities = [calc_resampled_motif_density(\n seq_pool_dict[pool_name], pool_size, pattern_dict[pname])\n for i in range(args.num_bootstraps)]\n density_result_dict[pname][pool_name] = densities\n\n # Dump results to pickle for latter replotting\n with open(output_dir + '/' + output_prefix + '.' + pickle_file_ext, 'wb') as f:\n pickle.dump(density_result_dict, f)\n\n\ndef read_fasta(filename, input_file_format):\n \"\"\"\n Read in a fasta file, and return sequences as a list.\n Input: fasta filename\n Output: sequence array \n \"\"\"\n fasta_list = []\n with open(filename, 'r') as f:\n for seq_rec in SeqIO.parse(f, input_file_format):\n seq_rec = seq_rec.upper()\n fasta_list.append(str(seq_rec.seq))\n return np.array(fasta_list)\n\n\ndef read_pattern_file(filename):\n \"\"\"\n Read in a pattern file. Note that pattern files must be two-column,\n tab-delimited files with the first column being the pattern name, and\n the second column the regular expression defining that pattern.\n \"\"\"\n pattern_dict = {}\n with open(filename, 'r') as f:\n for line in f:\n pname, reg_exp = line.strip().split('\\t')\n reg_exp = re.compile(reg_exp)\n pattern_dict[pname] = reg_exp\n return pattern_dict\n\n\ndef reverse_comp(fasta_array):\n \"\"\"\n Reverse complement a list of sequences\n Input: list of sequences\n Output: reverse complement of same sequence list\n \"\"\"\n trans_table = str.maketrans('AGCT', 'TCGA')\n rev_list = []\n for seq in fasta_array:\n rev_list.append(seq.translate(trans_table)[::-1])\n return np.array(rev_list)\n\n\ndef calc_resampled_motif_density(seq_array, samp_size, regex):\n \"\"\"\n Calculate the length-normalized density of a specific regular\n expression pattern in a resampled sequence pool.\n Inputs: list of sequences, number of seqs to draw, regular expression pattern\n Output: length-normalized motif density\n \"\"\"\n resampled_pool = np.random.choice(seq_array, size=samp_size, replace=True)\n total_seq_space = 0\n patterns_found = 0\n for seq in resampled_pool:\n patterns_found += len(re.findall(regex, seq))\n total_seq_space += len(seq)\n return patterns_found/total_seq_space\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "boberrey/sequence_analysis", "sub_path": "pattern_enrichment.py", "file_name": "pattern_enrichment.py", "file_ext": "py", "file_size_in_byte": 7332, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 80, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 110, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 116, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 133, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 144, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 144, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 185, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 189, "usage_type": "call"}]} {"seq_id": "38213118948", "text": "import re\nfrom collections import Counter\n\nimport gensim.parsing.preprocessing as proc\nfrom gensim.utils import deaccent, to_unicode\n\n####################\n# Global variables\n####################\n\n# Load list of stopwords\nwith open('stopwords-es.txt', 'r') as file:\n stopwords = list()\n for line in file:\n stop_word = deaccent(line.strip('\\n'))\n stopwords.append(stop_word)\nstopwords = frozenset(stopwords)\n\n# regexp for matching @usernames and #hashtags\ntw_handles = r\"([@][A-z]+)|([#][A-z]+)\"\n# for matching URLs\nurls = r\"((\\w+:\\/\\/)[-a-zA-Z0-9:@;?&=\\/%\\+\\.\\*!'\\(\\),\\$_\\{\\}\\^~\\[\\]`#|]+)\"\npunctuation_es = r'([!¡\"\\#\\$%\\&\\'\\(\\)\\*\\+,\\-\\./:;<=>\\?\\¿@\\[\\\\\\]\\^_`\\{\\|\\}\\~])+'\n# Master Regexp\nmulti_pattern = '|'.join([tw_handles, urls, punctuation_es])\nnon_plain_re = re.compile(multi_pattern, re.UNICODE)\n####################\n\ndef remove_non_plain(document):\n \"\"\"\n Replaces urls, @usernames, #tags, emojis and numbers\n with a ' ' (space). Also removes accents and punctuation\n to finally remove redundant whitespace and lowercase all\n characters\n :param document: string\n :return: processed unicode string\n \"\"\"\n document = to_unicode(document)\n document = non_plain_re.sub(' ', document)\n document = proc.strip_non_alphanum(document)\n document = proc.strip_numeric(document)\n document = proc.strip_multiple_whitespaces(document)\n document = deaccent(document)\n return document.lower()\n\ndef process(document):\n \"\"\"\n Tokenize a document (a tweet) removing:\n - punctuation\n - URLs and Twitter handles\n - Uppercases\n - Stopword\n - Whitespace\n :param document: string\n :returns: a list of strings\n \"\"\"\n wordbag = list()\n for token in set(remove_non_plain(document).split()):\n if token not in stopwords and token != '' \\\n and token != 'rt':\n wordbag.append(token)\n return wordbag\n\ndef init_counter(corpus):\n \"\"\"\n Creates a collections.Counter object from a corpus.\n :param corpus: iterator or iterable of lists of strings\n :returns: Counter\n \"\"\"\n ctr = Counter()\n for worbag in corpus:\n ctr.update(worbag)\n\n return ctr\n", "repo_name": "CeMasChile/Twitter", "sub_path": "codigo/npl_utils.py", "file_name": "npl_utils.py", "file_ext": "py", "file_size_in_byte": 2172, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "gensim.utils.deaccent", "line_number": 15, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "re.UNICODE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "gensim.utils.to_unicode", "line_number": 38, "usage_type": "call"}, {"api_name": "gensim.parsing.preprocessing.strip_non_alphanum", "line_number": 40, "usage_type": "call"}, {"api_name": "gensim.parsing.preprocessing", "line_number": 40, "usage_type": "name"}, {"api_name": "gensim.parsing.preprocessing.strip_numeric", "line_number": 41, "usage_type": "call"}, {"api_name": "gensim.parsing.preprocessing", "line_number": 41, "usage_type": "name"}, {"api_name": "gensim.parsing.preprocessing.strip_multiple_whitespaces", "line_number": 42, "usage_type": "call"}, {"api_name": "gensim.parsing.preprocessing", "line_number": 42, "usage_type": "name"}, {"api_name": "gensim.utils.deaccent", "line_number": 43, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 70, "usage_type": "call"}]} {"seq_id": "25014921855", "text": "from ocr import *\nimport glob\nimport matplotlib.pyplot as plt\n\nweight_path = r'.\\weights\\ocr\\yolov3-ocr_final.weights'\ncfg_path = r'.\\weights\\ocr\\yolov3-ocr.cfg'\n\ndef show_image(image):\n fig, ax1 = plt.subplots(1)\n ax1.imshow(image)\n plt.show()\n\nimage_paths = glob.glob(\"plates1/*.jpg\")\nimage_paths = glob.glob(\"m/*.jpg\")\nimage_paths = ['need contrast\\\\new\\\\534_0_plate.jpg']\nid = 0\nfor img_path in image_paths:\n platereader = PlateReader()\n img, _, _, _ = platereader.load_image(img_path)\n #segmentedImage, plate_label = platereader.retrieve_label_and_segmentedImage_from(img_path, weight_path, cfg_path)\n segmentedImage, plate_label, sum_confidences = platereader.final_retrieve_label_and_segmentedImage_from_im(img, weight_path, cfg_path)\n #cv2.imshow(plate_label, segmentedImage)\n #show_image(segmentedImage)\n #cv2.imwrite(img_path, segmentedImage)\n cv2.imwrite(img_path[:-4]+str(id)+\"_plate_\"+plate_label+\".jpg\", segmentedImage)\n id += 1\n", "repo_name": "FerdaousAzh/Plate-detection.", "sub_path": "plate_ocr.py", "file_name": "plate_ocr.py", "file_ext": "py", "file_size_in_byte": 979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 13, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 14, "usage_type": "call"}]} {"seq_id": "72142295485", "text": "#данный скрипт выдаёт словарь со всеми значениями и контекстами к ним (training set)\r\n\r\ndef scrape_txt():\r\n from Zvezda import define_microcontext\r\n from collections import Counter\r\n\r\n with open(\"static/file.txt\") as f:\r\n text = f.read()\r\n word_meanings = {}\r\n\r\n stopwords_r = open('static/stopwords_r.txt')\r\n stopwords_r = stopwords_r.read()\r\n stopwords_r = stopwords_r.split(', ')\r\n\r\n for item in text.split('\\n\\n'):\r\n text, meaning = item.split('значение: ')\r\n found, context = define_microcontext(text) #возвращается найденная лемма \"звезда\" и контекст\r\n\r\n context = list(filter(lambda x: x not in stopwords_r, context))#убираем стоп-слова из контекста\r\n\r\n text = context\r\n\r\n context = Counter(context) #Частотный словарь\r\n\r\n context = {k: round(v / len(text), 3) for k, v in context.items()} #Присвоение MLE-коэффицента каждому слову контекста\r\n\r\n word_meanings[meaning] = context\r\n\r\n return word_meanings\r\n\r\n\r\n#for k,v in scrape_txt().items():\r\n# print(k+':',v)\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "SilenosChestra/Finished_Projects", "sub_path": "Zvezda_Disambiguation/FlaskApplication/Scraper_txt.py", "file_name": "Scraper_txt.py", "file_ext": "py", "file_size_in_byte": 1262, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "Zvezda.define_microcontext", "line_number": 17, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 23, "usage_type": "call"}]} {"seq_id": "17586935942", "text": "from scapy.all import IP, TCP, sniff\nimport logging # first of all import the module\n\n\n\"\"\"\nRepresents a network packet with source and destination IP addresses\n\"\"\"\nclass Packet:\n def __init__(self,rules):\n self.rules = rules\n #True, \"12.168.3.1009\", \"0\", \"10.0.0.100\", \"0\"\n def get_summary(self, pkt):\n try:\n if TCP in pkt and IP in pkt:\n for rule in self.rules:\n #print(\"src\",pkt[IP].src, pkt[TCP].sport,\"dst\", pkt[IP].dst, pkt[TCP].dport)\n #Check if the ip address and port number matches\n if (pkt[IP].src == rule[1] and pkt[IP].dst == rule[3]): \n\n if(True): #pkt[TCP].sport == rule[2] ports changes a lot\n \n #If they matches check if that rule is to be allowed or blocked\n if rule[0] == True: \n log = \"Status: \", \"BLOCK\", \"Src IP: \",pkt[IP].src, \"Src Port:\", pkt[TCP].sport,\"Dst IP: \", pkt[IP].dst,\"Dst Port:\" ,pkt[TCP].dport\n logging.basicConfig(filename='firewall.log', filemode='w', format=\"%(asctime)s %(message)s\")\n logging.warning(log)\n print(log)\n \n elif rule[0] == False:\n log = \"Status: \", \"ALLOW\", \"Src IP: \",pkt[IP].src, \"Src Port:\", pkt[TCP].sport,\"Dst IP: \", pkt[IP].dst,\"Dst Port:\" ,pkt[TCP].dport\n logging.basicConfig(filename='firewall.log', filemode='w', format=\"%(asctime)s %(message)s\")\n logging.warning(log)\n print(log)\n \n else:\n print(\"Src IP: \",pkt[IP].src, \"Src Port:\", pkt[TCP].sport,\"Dst IP: \", pkt[IP].dst,\"Dst Port:\" ,pkt[TCP].dport) \n else:\n pass\n # return \"Waiting for pkt....\"\n except Exception as e:\n print(f\"[ERR] Error could not get pkt: {e}\")\n False\n\n# capture = sniff(5)\n# capture.summary() \n", "repo_name": "kudzaiprichard/python-packet-filter-firewall", "sub_path": "firewall/packet.py", "file_name": "packet.py", "file_ext": "py", "file_size_in_byte": 2200, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "46", "api": [{"api_name": "scapy.all.TCP", "line_number": 14, "usage_type": "name"}, {"api_name": "scapy.all.IP", "line_number": 14, "usage_type": "name"}, {"api_name": "scapy.all.IP", "line_number": 18, "usage_type": "name"}, {"api_name": "scapy.all.IP", "line_number": 24, "usage_type": "name"}, {"api_name": "scapy.all.TCP", "line_number": 24, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 26, "usage_type": "call"}, {"api_name": "scapy.all.IP", "line_number": 30, "usage_type": "name"}, {"api_name": "scapy.all.TCP", "line_number": 30, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 32, "usage_type": "call"}, {"api_name": "scapy.all.IP", "line_number": 36, "usage_type": "name"}, {"api_name": "scapy.all.TCP", "line_number": 36, "usage_type": "name"}]} {"seq_id": "35707862664", "text": "\"\"\"initial\n\nRevision ID: 2bbb456c6a28\nRevises:\nCreate Date: 2023-11-04 14:46:55.786593\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op # pylint: disable=no-name-in-module\n\n# revision identifiers, used by Alembic.\nrevision = '2bbb456c6a28'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n 'user',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('email', sa.String(length=64), nullable=False),\n sa.Column(\n 'created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False\n ),\n sa.Column(\n 'updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False\n ),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_user')),\n sa.UniqueConstraint('id', name=op.f('uq_user_id')),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user')\n # ### end Alembic commands ###\n", "repo_name": "tanasecucliciu/fastapi-async-demo-project", "sub_path": "src/app/alembic/versions/2bbb456c6a28_initial.py", "file_name": "2bbb456c6a28_initial.py", "file_ext": "py", "file_size_in_byte": 1127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "alembic.op.create_table", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op.f", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op.f", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}]} {"seq_id": "11744648565", "text": "# _*_ coding:utf-8 _*_\n\nfrom django.conf.urls import url\n\nfrom .views import MessageDraftView, MessageInfoView, MessageManagementView, MessageSearchView,\\\n MessageDraftFileUploadView, MessageCategoryManageView, ItemsMakeCountView, ItemReceiverCountView,\\\n ItemsMakeSearchView,ItemReceiverSearchView\n\n\n__author__ = 'haoSev7'\n__date__ = '2017/5/26 21:59'\n\n\nurlpatterns = [\n # 宣传管理信息申请表\n url(r'^messagedraft/$', MessageDraftView.as_view(), name='message_draft'),\n # 宣传信息统计页面\n url(r'^messageinfo/$', MessageInfoView.as_view(), name='message_info'),\n # 宣传信息管理页面\n url(r'^management/$', MessageManagementView.as_view(), name='management'),\n # 宣传信息查询页面\n url(r'^messagesearch/$', MessageSearchView.as_view(), name='messagesearch'),\n # 宣传信息申请表 附件上传\n url(r'^messagedraftupload/$', MessageDraftFileUploadView.as_view(), name='message_upload'),\n # 宣传信息类别管理页面\n url(r'^messagecategory/$', MessageCategoryManageView.as_view(), name='management_category'),\n # 宣传物资制作查询页面\n url(r'^itemmakesearch/$', ItemsMakeSearchView.as_view(), name='itemmakesearch'),\n # 宣传物资制作统计页面\n url(r'^itemmakecount/$', ItemsMakeCountView.as_view(), name='item_make_count'),\n # 宣传物资制作查询页面\n url(r'^itemreceiversearch/$', ItemReceiverSearchView.as_view(), name='itemreceiversearch'),\n # 宣传物资领用统计页面\n url(r'^itemreceivercount/$', ItemReceiverCountView.as_view(), name='item_receiver_count'),\n]", "repo_name": "lhr520czx/Aproject", "sub_path": "apps/xuanchuan/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1599, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "views.MessageDraftView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.MessageDraftView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "views.MessageInfoView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "views.MessageInfoView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "views.MessageManagementView.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "views.MessageManagementView", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "views.MessageSearchView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "views.MessageSearchView", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "views.MessageDraftFileUploadView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "views.MessageDraftFileUploadView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "views.MessageCategoryManageView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "views.MessageCategoryManageView", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "views.ItemsMakeSearchView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "views.ItemsMakeSearchView", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "views.ItemsMakeCountView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "views.ItemsMakeCountView", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "views.ItemReceiverSearchView.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "views.ItemReceiverSearchView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "views.ItemReceiverCountView.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "views.ItemReceiverCountView", "line_number": 34, "usage_type": "name"}]} {"seq_id": "37895612584", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 2 02:09:45 2023\n\n@author: bat_j\n\"\"\"\n\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom datetime import datetime, timedelta\nfrom python_scripts.Raw_Data_Processing import Raw_Data_Processing\nfrom python_scripts.Feature_engineering import Feature_engineering\nfrom python_scripts.unit_test import moving_mean_unit_test, moving_median_unit_test\nfrom python_scripts.Applying_ML import ML_model\n\ndefault_args = {\n \"owner\": \"airflow\",\n \"retries\": 5,\n \"retry_delay\": timedelta(minutes=2),\n \"start_date\": datetime(2023,5,11)\n }\n\n\nwith DAG(\n default_args = default_args,\n dag_id = 'Data_Pipline_V1',\n description = 'DAG for the assessment',\n schedule_interval = '@daily'\n ) as dag:\n \n Data_Processing = PythonOperator(\n task_id = 'data_processing',\n python_callable = Raw_Data_Processing \n )\n \n Feature_Engineering = PythonOperator(\n task_id = 'feature_engineering',\n python_callable = Feature_engineering\n )\n \n Unit_test_1 = PythonOperator(\n task_id = 'unit_test_mean',\n python_callable = moving_mean_unit_test\n )\n \n Unit_test_2 = PythonOperator(\n task_id = 'unit_test_median',\n python_callable = moving_median_unit_test \n )\n \n Machine_learning_model = PythonOperator(\n task_id = 'Machine_learning_model',\n python_callable = ML_model \n )\n \n Data_Processing >> Feature_Engineering\n Feature_Engineering >> Unit_test_1\n Feature_Engineering >> Unit_test_2\n Unit_test_1 >> Machine_learning_model\n Unit_test_2 >> Machine_learning_model", "repo_name": "Om-patel2398/Stock_Market_Prediction", "sub_path": "dags/Trial_1.py", "file_name": "Trial_1.py", "file_ext": "py", "file_size_in_byte": 1701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "datetime.timedelta", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 24, "usage_type": "call"}, {"api_name": "airflow.operators.python.PythonOperator", "line_number": 31, "usage_type": "call"}, {"api_name": "python_scripts.Raw_Data_Processing.Raw_Data_Processing", "line_number": 33, "usage_type": "name"}, {"api_name": "airflow.operators.python.PythonOperator", "line_number": 36, "usage_type": "call"}, {"api_name": "python_scripts.Feature_engineering.Feature_engineering", "line_number": 38, "usage_type": "name"}, {"api_name": "airflow.operators.python.PythonOperator", "line_number": 41, "usage_type": "call"}, {"api_name": "python_scripts.unit_test.moving_mean_unit_test", "line_number": 43, "usage_type": "name"}, {"api_name": "airflow.operators.python.PythonOperator", "line_number": 46, "usage_type": "call"}, {"api_name": "python_scripts.unit_test.moving_median_unit_test", "line_number": 48, "usage_type": "name"}, {"api_name": "airflow.operators.python.PythonOperator", "line_number": 51, "usage_type": "call"}, {"api_name": "python_scripts.Applying_ML.ML_model", "line_number": 53, "usage_type": "name"}]} {"seq_id": "38448267706", "text": "from collections import deque\nfrom copy import deepcopy\n\ndef solution(name):\n answer = 0\n queue = deque()\n visited = [[False] for _ in name]\n dd = [1, -1]\n init = 'A' * len(name)\n \n # position, movements\n queue.append([0, upAndDown(name[0]), name[0] + init[1:]])\n visited[0] = True\n result = 987654321\n \n while queue:\n p, m, copied_name = queue.popleft()\n \n if copied_name == name:\n result = min(result, m)\n break\n \n for i in range(2):\n _p = p + dd[i]\n \n if _p == -1: _p = len(name) - 1\n elif _p == len(name):\n _p = 0\n \n if not visited[_p]: continue\n visited[_p] = True\n if _p == len(name) - 1:\n queue.append([_p, m + upAndDown(name[_p]) + 1, copied_name[0:_p] + name[_p]])\n else:\n queue.append([_p, m + upAndDown(name[_p]) + 1, copied_name[0:_p] + name[_p] + copied_name[_p + 1:]])\n \n answer = result\n return answer\n\ndef upAndDown(alpha):\n # starts with 'A'\n # go up\n up = ord('Z') - ord(alpha) + 1\n \n # go down\n down = ord(alpha) - ord('A')\n \n return min(up, down)\n\nprint(solution(\"JEROEN\"))", "repo_name": "nayoon-kim/Algorithm", "sub_path": "조이스틱.py", "file_name": "조이스틱.py", "file_ext": "py", "file_size_in_byte": 1275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "collections.deque", "line_number": 6, "usage_type": "call"}]} {"seq_id": "73685995002", "text": "from wordcloud import WordCloud, STOPWORDS\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n\n\nfile_path = \"Classic Computer Science Problems in Python by David Kopec (z-lib.org).txt\"\nfile_handle = open(file_path).read()\nstopwords = set(STOPWORDS)\nstopwords.add(\"will\")\n\nbook = WordCloud(max_words=5000, stopwords=stopwords)\nbook.generate(file_handle)\nplt.imshow(book, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\n\nimage_file = \"griffin-silhouette-ancient-mythology-fantasy-vector-18016755.jpg\"\n\n# create mask\na_mask = np.array(Image.open(image_file))\n\nwc = WordCloud(background_color=\"black\", max_words=2000, mask=a_mask,\n stopwords=stopwords)\nwc.generate(file_handle)\n\nplt.figure(figsize=(8,6), dpi=120)\nplt.imshow(wc, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\n\n", "repo_name": "gargnityansh/WordCloud", "sub_path": "wordcloud.py", "file_name": "wordcloud.py", "file_ext": "py", "file_size_in_byte": 821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "wordcloud.STOPWORDS", "line_number": 10, "usage_type": "argument"}, {"api_name": "wordcloud.WordCloud", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} {"seq_id": "42921966534", "text": "# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. \n# If a copy of the MPL was not distributed with this file, \n# You can obtain one at https://mozilla.org/MPL/2.0/.\n# This file is part of NF Compose\n# [2019] - [2023] © NeuroForge GmbH & Co. KG\n\n\nfrom typing import Any\nfrom django.http.response import HttpResponse\n\nfrom django.shortcuts import render\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.contrib.auth.decorators import login_required\n\nfrom skipper.settings import LOGIN_URL\n\nversion: str\nwith open('skipper/static-private/version.txt') as version_file:\n version = version_file.read().replace('\\n', '')\n\nlicenses: str\nwith open('skipper/static-private/OPENSOURCE_LICENSES.html') as licenses_file:\n licenses = licenses_file.read().replace('\\n', '')\n\n@permission_classes([IsAuthenticated])\n@login_required(login_url='/' + LOGIN_URL)\ndef licensing_view(request: Any) -> Any:\n if request.method == 'GET':\n return render(request, 'skipper/licensing.html', {\n 'name': 'Licensing',\n 'version': version\n })\n\n@permission_classes([IsAuthenticated])\n@login_required(login_url='/' + LOGIN_URL)\ndef licensing_oss_view(request: Any) -> Any:\n return HttpResponse(content=licenses, content_type='text/html')", "repo_name": "neuroforgede/nfcompose", "sub_path": "skipper/skipper/common/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 27, "usage_type": "call"}, {"api_name": "skipper.settings.LOGIN_URL", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "name"}, {"api_name": "django.http.response.HttpResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 36, "usage_type": "call"}, {"api_name": "skipper.settings.LOGIN_URL", "line_number": 36, "usage_type": "name"}]} {"seq_id": "32718667329", "text": "import configs\nimport datasets\nimport numpy as np\nfrom torchvision import datasets as Datasets\n\n\n__all__ = ['MNIST']\n\n\nclass MNIST(datasets.BaseDataset):\n\n def __init__(self, cfg, **kwargs):\n super(MNIST, self).__init__(cfg, **kwargs)\n\n @staticmethod\n def more(cfg):\n cfg.path = configs.env.getdir(cfg.path)\n cfg.source.elements = cfg.source.width * cfg.source.height * cfg.source.time\n out = dict()\n out['elements'] = 1\n cfg.out = configs.BaseConfig(out)\n cfg.cross_folder = 0\n cfg.index_cross = 0\n return cfg\n\n def load(self):\n train = Datasets.MNIST(self.cfg.path, train=True, download=True)\n test = Datasets.MNIST(self.cfg.path, train=False, download=True)\n\n source_train, source_test = train.data.numpy()[:, np.newaxis, :, :], test.data.numpy()[:, np.newaxis, :, :]\n source_train, source_test = source_train.astype(np.float32), source_test.astype(np.float32)\n target_train, target_test = train.targets.numpy()[:, np.newaxis], test.targets.numpy()[:, np.newaxis]\n count_train, count_test = len(train), len(test)\n count = count_train + count_test\n\n return {'source_train': source_train, 'target_train': target_train,\n 'source_test': source_test, 'target_test': target_test}, \\\n {'count_train': count_train, 'count_test': count_test, 'count': count}\n\n def __getitem__(self, index):\n if index < self.cfg.data_count['count_train']:\n source, target = self.data['source_train'][index], self.data['target_train'][index]\n else:\n index = index - self.cfg.data_count['count_train']\n source, target = self.data['source_test'][index], self.data['target_test'][index]\n return {'source': source, 'target': target}, index\n\n def split(self, index_cross=None):\n self._reset_norm(split_items=[['source_train', 'target_train'], ['source_test', 'target_test']])\n\n trainset = datasets.BaseSplit(self, [[0, self.cfg.data_count['count_train']]])\n testset = datasets.BaseSplit(self, [[self.cfg.data_count['count_train'], self.cfg.data_count['count']]])\n\n return trainset, testset\n\n\nif __name__ == \"__main__\":\n datasets.BaseTest(MNIST).run()\n", "repo_name": "whiplash003/pytorch_template", "sub_path": "datasets/MNIST.py", "file_name": "MNIST.py", "file_ext": "py", "file_size_in_byte": 2272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "datasets.BaseDataset", "line_number": 10, "usage_type": "attribute"}, {"api_name": "configs.env.getdir", "line_number": 17, "usage_type": "call"}, {"api_name": "configs.env", "line_number": 17, "usage_type": "attribute"}, {"api_name": "configs.BaseConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 32, "usage_type": "attribute"}, {"api_name": "datasets.BaseSplit", "line_number": 51, "usage_type": "call"}, {"api_name": "datasets.BaseSplit", "line_number": 52, "usage_type": "call"}, {"api_name": "datasets.BaseTest", "line_number": 58, "usage_type": "call"}]} {"seq_id": "27681564222", "text": "from datetime import timedelta, date\n\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.crypto import get_random_string\nfrom activity.models import Activity\nfrom member.models import Club, Membership\nfrom note.models import NoteUser\n\n\nclass TestPermissionDenied(TestCase):\n \"\"\"\n Load some protected pages and check that we have 403 errors.\n \"\"\"\n fixtures = ('initial',)\n\n def setUp(self) -> None:\n # Create sample user with no rights\n self.user = User.objects.create(\n username=\"toto\",\n )\n NoteUser.objects.create(user=self.user)\n self.client.force_login(self.user)\n\n def test_consos(self):\n response = self.client.get(reverse(\"note:consos\"))\n self.assertEqual(response.status_code, 403)\n\n def test_create_activity(self):\n response = self.client.get(reverse(\"activity:activity_create\"))\n self.assertEqual(response.status_code, 403)\n\n def test_activity_entries(self):\n activity = Activity.objects.create(\n name=\"\",\n description=\"\",\n creater=self.user,\n activity_type_id=4,\n organizer_id=1,\n attendees_club_id=1,\n date_start=timezone.now(),\n date_end=timezone.now(),\n )\n response = self.client.get(reverse(\"activity:activity_entry\", kwargs=dict(pk=activity.pk)))\n self.assertEqual(response.status_code, 403)\n\n def test_invite_activity(self):\n activity = Activity.objects.create(\n name=\"\",\n description=\"\",\n creater=self.user,\n activity_type_id=4,\n organizer_id=1,\n attendees_club_id=1,\n date_start=timezone.now(),\n date_end=timezone.now(),\n )\n response = self.client.get(reverse(\"activity:activity_invite\", kwargs=dict(pk=activity.pk)))\n self.assertEqual(response.status_code, 403)\n\n def test_create_club(self):\n response = self.client.get(reverse(\"member:club_create\"))\n self.assertEqual(response.status_code, 403)\n\n def test_add_member_club(self):\n club = Club.objects.create(name=get_random_string(127))\n response = self.client.get(reverse(\"member:club_add_member\", kwargs=dict(club_pk=club.pk)))\n self.assertEqual(response.status_code, 403)\n\n def test_renew_membership(self):\n club = Club.objects.create(name=get_random_string(127))\n membership = Membership.objects.create(user=self.user, club=club)\n response = self.client.get(reverse(\"member:club_renew_membership\", kwargs=dict(pk=membership.pk)))\n self.assertEqual(response.status_code, 403)\n\n\n def test_create_invoice(self):\n response = self.client.get(reverse(\"treasury:invoice_create\"))\n self.assertEqual(response.status_code, 403)\n\n def test_list_invoices(self):\n response = self.client.get(reverse(\"treasury:invoice_list\"))\n self.assertEqual(response.status_code, 403)\n\n def test_create_remittance(self):\n response = self.client.get(reverse(\"treasury:remittance_create\"))\n self.assertEqual(response.status_code, 403)\n\n def test_list_remittance(self):\n response = self.client.get(reverse(\"treasury:remittance_list\"))\n self.assertEqual(response.status_code, 403)\n\n\n\nclass TestLoginRedirect(TestCase):\n def test_consos_page(self):\n response = self.client.get(reverse(\"note:consos\"))\n self.assertRedirects(response, reverse(\"login\") + \"?next=\" + reverse(\"note:consos\"), 302, 200)\n", "repo_name": "jbdoderlein/notes-ker-lann", "sub_path": "apps/permission/tests/test_permission_denied.py", "file_name": "test_permission_denied.py", "file_ext": "py", "file_size_in_byte": 3637, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.test.TestCase", "line_number": 13, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "note.models.NoteUser.objects.create", "line_number": 24, "usage_type": "call"}, {"api_name": "note.models.NoteUser.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "note.models.NoteUser", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 32, "usage_type": "call"}, {"api_name": "activity.models", "line_number": 36, "usage_type": "name"}, {"api_name": "activity.models.Activity.objects.create", "line_number": 36, "usage_type": "call"}, {"api_name": "activity.models.Activity.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "activity.models.Activity", "line_number": 36, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 43, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 43, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 44, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 44, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 46, "usage_type": "call"}, {"api_name": "activity.models.pk", "line_number": 46, "usage_type": "attribute"}, {"api_name": "activity.models", "line_number": 46, "usage_type": "name"}, {"api_name": "activity.models", "line_number": 50, "usage_type": "name"}, {"api_name": "activity.models.Activity.objects.create", "line_number": 50, "usage_type": "call"}, {"api_name": "activity.models.Activity.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "activity.models.Activity", "line_number": 50, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 57, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 57, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 58, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 58, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 60, "usage_type": "call"}, {"api_name": "activity.models.pk", "line_number": 60, "usage_type": "attribute"}, {"api_name": "activity.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 64, "usage_type": "call"}, {"api_name": "member.models.Club.objects.create", "line_number": 68, "usage_type": "call"}, {"api_name": "member.models.Club.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "member.models.Club", "line_number": 68, "usage_type": "name"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 68, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 69, "usage_type": "call"}, {"api_name": "member.models.Club.objects.create", "line_number": 73, "usage_type": "call"}, {"api_name": "member.models.Club.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "member.models.Club", "line_number": 73, "usage_type": "name"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 73, "usage_type": "call"}, {"api_name": "member.models.Membership.objects.create", "line_number": 74, "usage_type": "call"}, {"api_name": "member.models.Membership.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "member.models.Membership", "line_number": 74, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 75, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 80, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 84, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 88, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 92, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 97, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 99, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 100, "usage_type": "call"}]} {"seq_id": "72873454203", "text": "import openpyxl\nfrom work_on_OS import date_picker\nfrom openpyxl.styles import PatternFill\nfrom tkinter import Tk\nfrom tkinter.filedialog import askopenfilename\n\n\nTk().withdraw()\nfile_mame = askopenfilename(title=\"Выбор файла\", filetypes=[(\"Excel - файлы\", \".xlsx\")])\n\n\n\ndef open_file():\n name = file_mame\n wb = openpyxl.load_workbook(name)\n sheet = wb.active\n return sheet\n\n\ndef validation_of_excel_file():\n sheet = open_file()\n for i in range(len(sheet['1'])):\n if \"Я подтверждаю, что являюсь совершеннолетним гражданином \" \\\n \"РФ и потребителем никотинсодержащей продукции\" in str(sheet['1'][i].value):\n return 0\n return 1\n\n\ndef calculating_the_number_of_items():\n list_of_purchased_products = []\n list_of_presented_products = []\n capsules = \"Капсулы\"\n capsules_present = \"Сменные капсулы Logic Compact\"\n count_of_capsules_buy = 0\n count_of_capsules_present = 0\n device = \"Logic Compact девайс\"\n device_present = \"Электронный испаритель Logic Compact\"\n count_of_device_buy = 0\n count_of_device_present = 0\n case_present = \"Чехол\"\n count_of_case_present = 0\n sheet = open_file()\n acc = 1\n while str(sheet['1'][acc].value).find(\"Подарки\") == -1:\n acc = acc + 1\n zero_point = acc - 14\n for i in range(len(sheet['A'])):\n print(f\"Проверка {i+1} строки из {len(sheet['A'])}...\")\n for j in range(32+zero_point, len(sheet[i + 1])):\n if str(sheet[i + 2][j].value).find(capsules) != -1:\n count_of_capsules_buy += int(sheet[i + 2][j+1].value)\n if str(sheet[i + 2][j].value).find(device) != -1:\n count_of_device_buy += int(sheet[i + 2][j+1].value)\n for j in range(acc, 31+zero_point):\n if str(sheet[i + 2][j].value).find(capsules_present) != -1:\n count_of_capsules_present += int(sheet[i + 2][j+1].value)\n if str(sheet[i + 2][j].value).find(device_present) != -1:\n count_of_device_present += int(sheet[i + 2][j+1].value)\n if str(sheet[i + 2][j].value).find(case_present) != -1:\n count_of_case_present += int(sheet[i + 2][j+1].value)\n one_action_buy = []\n one_action_present = []\n one_action_buy.append(count_of_capsules_buy)\n one_action_buy.append(count_of_device_buy)\n one_action_present.append(count_of_capsules_present)\n one_action_present.append(count_of_device_present)\n one_action_present.append(count_of_case_present)\n count_of_capsules_buy = 0\n count_of_device_buy = 0\n count_of_capsules_present = 0\n count_of_device_present = 0\n count_of_case_present = 0\n list_of_purchased_products.append(one_action_buy)\n list_of_presented_products.append(one_action_present)\n return list_of_purchased_products, list_of_presented_products, zero_point\n\n\n\ndef paint_a_cell(cell, color):\n cell.fill = PatternFill(start_color= color,\n end_color= color,\n fill_type='solid')\n\n\ndef main_function():\n date = date_picker()\n acc = 0\n four_capsules = 0\n core_device = 0\n one_capsules = 0\n two_capsules = 0\n three_capsules = 0\n case = 0\n name = file_mame\n wb = openpyxl.load_workbook(name)\n sheet = wb.active\n items_and_presents = calculating_the_number_of_items()\n items = items_and_presents[0]\n presents = items_and_presents[1]\n zero_point = items_and_presents[2]\n print(\"Завершение...\")\n for i in range(len(items)-1):\n if items[i][1] == 1: # Уст обычное + 4 пачке капсул\n if presents[i][0] == 4:\n paint_a_cell(sheet[i + 2][24+zero_point], \"0000FF00\")\n four_capsules += 1\n else:\n paint_a_cell(sheet[i + 2][24+zero_point], \"FFFF0000\")\n acc += 1\n elif items[i][0] == 2: # 2+1\n if presents[i][0] == 1:\n paint_a_cell(sheet[i + 2][24+zero_point], \"0000FF00\")\n one_capsules += 1\n else:\n paint_a_cell(sheet[i + 2][24+zero_point], \"FFFF0000\")\n acc += 1\n elif items[i][0] == 3: # 3 пачке капсул + уст обычное\n if presents[i][1] == 1:\n paint_a_cell(sheet[i + 2][24+zero_point], \"0000FF00\")\n core_device += 1\n else:\n paint_a_cell(sheet[i + 2][24+zero_point], \"FFFF0000\")\n acc += 1\n elif items[i][0] == 6: # 6+3\n if presents[i][0] == 3:\n paint_a_cell(sheet[i + 2][24+zero_point], \"0000FF00\")\n three_capsules += 1\n else:\n paint_a_cell(sheet[i + 2][24+zero_point], \"FFFF0000\")\n acc += 1\n elif items[i][0] == 4: # 4+2\n if presents[i][0] == 2:\n paint_a_cell(sheet[i + 2][24+zero_point], \"0000FF00\")\n two_capsules += 1\n elif presents[i][2] == 1:\n paint_a_cell(sheet[i + 2][24 + zero_point], \"0000FF00\")\n case += 1\n else:\n paint_a_cell(sheet[i + 2][24+zero_point], \"FFFF0000\")\n acc += 1\n elif items[i][0] == 0 and items[i][1] == 0:\n if presents[i][0] == 0 and presents[i][1] == 0:\n pass\n else:\n pass\n else:\n paint_a_cell(sheet[i + 2][24+zero_point], \"FFFF0000\")\n acc += 1\n for i in range(len(sheet['AG'])-1):\n if str(sheet[i+2][32+zero_point].value).find(\"NEW Кардхол\") != -1:\n paint_a_cell(sheet[i + 2][24+zero_point], \"FFFFFF00\")\n\n wb.save(\"Reports/\" + date + '_report.xlsx')\n return four_capsules, core_device, one_capsules, two_capsules, three_capsules, date, acc, case\n", "repo_name": "IlyaStarkov/LogicApp", "sub_path": "work_on_excel.py", "file_name": "work_on_excel.py", "file_ext": "py", "file_size_in_byte": 6044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "tkinter.Tk", "line_number": 8, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 9, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 15, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 80, "usage_type": "call"}, {"api_name": "work_on_OS.date_picker", "line_number": 86, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 95, "usage_type": "call"}]} {"seq_id": "32743918266", "text": "import random\nimport laspy as lp\nimport numpy as np\n\n\ndef one_hot_encode(labels):\n targets = np.array(list(map(label_mapper, labels)))\n nb_classes = 5\n one_hot_targets = np.eye(nb_classes)[targets]\n return one_hot_targets\n\n\ndef group_into_grids(points, grid_dim):\n pt_cloud_grids = []\n sorted_by_x = points[np.argsort(points[:, 0])]\n splits_along_x = np.array_split(sorted_by_x, grid_dim)\n for split in splits_along_x:\n sorted_by_y = split[np.argsort(split[:, 1])]\n splits_along_y = np.array_split(sorted_by_y, grid_dim)\n pt_cloud_grids.extend(splits_along_y)\n return pt_cloud_grids\n\n\ndef label_mapper(label):\n \"\"\"\n for dales\n Args:\n label:\n\n Returns:\n\n \"\"\"\n label_map = {1.0: 0, 2.0: 1, 6.0: 2, 9.0: 3, 26.0: 4}\n return label_map[label]\n\n\ndef norm_pts(x):\n \"\"\"\n Normalise points to a\n Unit Sphere\n Args:\n x: input arr\n\n Returns: points normalised\n to Unit Sphere\n\n \"\"\"\n x = x / 1000\n x -= np.mean(x, axis=0)\n dists = np.linalg.norm(x, axis=1)\n return x / np.max(dists)\n\ndef normalise_intensities(intensities):\n intensities = np.clip(intensities, a_min=1, a_max=600)\n intensities = (intensities - 1)/600\n return intensities\n\ndef generate_samples(points, sample_size):\n inputs = []\n labels = []\n for i in range(10):\n indices = np.random.randint(points.shape[0], size=sample_size)\n sample = points[indices, :]\n inp = sample[:, :-1]\n label = one_hot_encode(sample[:, -1])\n inputs.append(inp)\n labels.append(label)\n return inputs, labels\n\n\norig_point_cloud = lp.read('C_37EZ1.las')\norig_data = np.vstack((orig_point_cloud.x, orig_point_cloud.y, orig_point_cloud.z, orig_point_cloud.intensity,\n orig_point_cloud.classification)).transpose()\n\n# globally normalise intensities\norig_data[:, 3] = normalise_intensities(orig_data[:, 3])\n\norig_grids = group_into_grids(orig_data, 10)\n\norig_grids = random.sample(orig_grids, 30)\n\ninput_samples = []\nlabel_samples = []\ncount = 0\nfor orig_grid in orig_grids:\n\n grids = group_into_grids(orig_grid, 10)\n for grid in grids:\n grid[:, :3] = norm_pts(grid[:, :3])\n inputs, labels = generate_samples(grid, 2048)\n input_samples.extend(inputs)\n label_samples.extend(labels)\n count += 1\n print(f'{count} files processed')\n\nnp.save('inputs.npy', np.asarray(input_samples))\nnp.save('labels.npy', np.asarray(label_samples))\n", "repo_name": "patchy631/3D-supervised-seg", "sub_path": "data_generator.py", "file_name": "data_generator.py", "file_ext": "py", "file_size_in_byte": 2483, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "laspy.read", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 72, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 97, "usage_type": "call"}]} {"seq_id": "11384035136", "text": "import jwt\n\nfrom django.urls import reverse\nfrom django.contrib.sites.shortcuts import get_current_site\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny\n\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\n\nfrom watchLuxuryAPI.settings import SECRET_KEY, SIMPLE_JWT\nfrom watchLuxuryAPI.utils import response_code as rescode\n\nfrom users.serializers import UserSerializer\nfrom users.models import User\n\nfrom .serializers import LoginSerializer\nfrom .utils import EmailUtil\n\n\nclass LoginView(TokenObtainPairView):\n permission_classes = (AllowAny, )\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n\n try:\n serializer.is_valid()\n except Exception:\n return Response({\n 'code': rescode.API_INVALID_LOGIN,\n 'msg': 'Invalid login info',\n }, status=status.HTTP_401_UNAUTHORIZED)\n\n return Response({\n 'code': rescode.API_SUCCESS,\n 'msg': 'User authenticated',\n 'data': serializer.validated_data,\n }, status=status.HTTP_200_OK)\n \n\nclass RefreshView(TokenRefreshView):\n permission_classes = (AllowAny, )\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n\n try:\n serializer.is_valid()\n except Exception:\n return Response({\n 'code': rescode.API_GENERIC_ERROR,\n 'msg': 'Request failed',\n }, status=status.HTTP_400_BAD_REQUEST)\n\n return Response({\n 'code': rescode.API_SUCCESS,\n 'msg': 'Access token refreshed',\n 'data': serializer.validated_data,\n }, status=status.HTTP_200_OK)\n\n\nclass RegisterView(APIView):\n permission_classes = (AllowAny, )\n\n def post(self, request):\n print(request.data)\n\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid():\n user = serializer.save()\n\n token = RefreshToken.for_user(user).access_token\n current_site = get_current_site(request).domain\n relative_link = reverse('verify_email')\n EmailUtil.send_email(\n 'Verify your email',\n f\"\"\"Hi {user.username}. Use the link below to verify your email \\n\n http://{current_site}{relative_link}?token={str(token)}\n \"\"\",\n user.email\n )\n\n return Response({\n 'code': rescode.API_SUCCESS,\n 'msg': f'Created user',\n 'data': serializer.data\n }, status=status.HTTP_201_CREATED)\n\n print(serializer.errors)\n return Response({\n 'code': rescode.API_GENERIC_ERROR,\n 'msg': 'Request failed',\n }, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass VerifyEmailView(APIView):\n permission_classes = (AllowAny, )\n\n def get(self, request):\n token = request.query_params.get('token')\n\n if not token:\n return Response({\"msg\": \"Please provide an access token\"}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n payload = jwt.decode(token, SECRET_KEY, [SIMPLE_JWT[\"ALGORITHM\"]])\n user = User.objects.get(id=payload['user_id'])\n if not user.is_active:\n user.is_active = True\n user.save()\n return Response({\"msg\": \"User account successfully activated!\"}, status=status.HTTP_200_OK)\n except jwt.exceptions.ExpiredSignatureError:\n return Response({\"msg\": \"Activation link expired.\"}, status=status.HTTP_400_BAD_REQUEST)\n except jwt.exceptions.InvalidSignatureError:\n return Response({\"msg\": \"Invalid activation link.\"}, status=status.HTTP_400_BAD_REQUEST)\n", "repo_name": "TaPDuy/watch-luxury-backend", "sub_path": "authentication/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4036, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "rest_framework_simplejwt.views.TokenObtainPairView", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 25, "usage_type": "name"}, {"api_name": "serializers.LoginSerializer", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "watchLuxuryAPI.utils.response_code.API_INVALID_LOGIN", "line_number": 35, "usage_type": "attribute"}, {"api_name": "watchLuxuryAPI.utils.response_code", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "watchLuxuryAPI.utils.response_code.API_SUCCESS", "line_number": 40, "usage_type": "attribute"}, {"api_name": "watchLuxuryAPI.utils.response_code", "line_number": 40, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView", "line_number": 46, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 47, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 55, "usage_type": "call"}, {"api_name": "watchLuxuryAPI.utils.response_code.API_GENERIC_ERROR", "line_number": 56, "usage_type": "attribute"}, {"api_name": "watchLuxuryAPI.utils.response_code", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 58, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 58, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 60, "usage_type": "call"}, {"api_name": "watchLuxuryAPI.utils.response_code.API_SUCCESS", "line_number": 61, "usage_type": "attribute"}, {"api_name": "watchLuxuryAPI.utils.response_code", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 64, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 64, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 67, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 68, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializer", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.tokens.RefreshToken.for_user", "line_number": 77, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.tokens.RefreshToken", "line_number": 77, "usage_type": "name"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 78, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.EmailUtil.send_email", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.EmailUtil", "line_number": 80, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 88, "usage_type": "call"}, {"api_name": "watchLuxuryAPI.utils.response_code.API_SUCCESS", "line_number": 89, "usage_type": "attribute"}, {"api_name": "watchLuxuryAPI.utils.response_code", "line_number": 89, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 92, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 92, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 95, "usage_type": "call"}, {"api_name": "watchLuxuryAPI.utils.response_code.API_GENERIC_ERROR", "line_number": 96, "usage_type": "attribute"}, {"api_name": "watchLuxuryAPI.utils.response_code", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 98, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 98, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 101, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 102, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 108, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 108, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 108, "usage_type": "name"}, {"api_name": "jwt.decode", "line_number": 111, "usage_type": "call"}, {"api_name": "watchLuxuryAPI.settings.SECRET_KEY", "line_number": 111, "usage_type": "argument"}, {"api_name": "watchLuxuryAPI.settings.SIMPLE_JWT", "line_number": 111, "usage_type": "name"}, {"api_name": "users.models.User.objects.get", "line_number": 112, "usage_type": "call"}, {"api_name": "users.models.User.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "users.models.User", "line_number": 112, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 116, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 116, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 116, "usage_type": "name"}, {"api_name": "jwt.exceptions", "line_number": 117, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 118, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 118, "usage_type": "name"}, {"api_name": "jwt.exceptions", "line_number": 119, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 120, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 120, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 120, "usage_type": "name"}]} {"seq_id": "12047585505", "text": "import os\nimport setuptools\n\nshort_description = 'LaunchPanel is a simple interface designed to expose LaunchPad Actions to the user in an intuitive way.'\nif os.path.exists('README.md'):\n with open('README.md', 'r') as fh:\n long_description = fh.read()\n\nelse:\n long_description = short_description\n\nsetuptools.setup(\n name='launchpanel',\n version='2.0.1',\n author='Mike Malinowski',\n author_email='mike.malinowski@outlook.com',\n description=short_description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/mikemalinowski/launchpad',\n packages=setuptools.find_packages(),\n classifiers=[\n 'Programming Language :: Python',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n ],\n package_data={\n '': ['_resources/*.png', '_resources/*.ui', '_resources/*.qss'],\n },\n install_requires=['qute', 'scribble', 'factories', 'launchpad'],\n keywords=\"launch launchpad pad action actions launchpanel panel\",\n)\n", "repo_name": "mikemalinowski/launchpanel", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.exists", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 21, "usage_type": "call"}]} {"seq_id": "9176163934", "text": "\"\"\" Tasks for the grades app \"\"\"\nimport json\n\nfrom celery import shared_task\nfrom requests.exceptions import HTTPError\nfrom rest_framework.exceptions import ValidationError\nfrom user_tasks.tasks import UserTask\n\nfrom registrar.apps.core.jobs import post_job_failure, post_job_success\nfrom registrar.apps.core.tasks import get_program\n\nfrom . import lms_interop as lms\nfrom .constants import GradeReadStatus\nfrom .serializers import serialize_course_run_grades_to_csv\n\n\n@shared_task(base=UserTask, bind=True)\n# pylint: disable=unused-argument\ndef get_course_run_grades(self, job_id, user_id, file_format, program_key, internal_course_key):\n \"\"\"\n A user task that reads course run grade data from the LMS, and writes it to\n a JSON- or CSV-formatted result file.\n \"\"\"\n program = get_program(job_id, program_key)\n if not program:\n return\n try:\n any_successes, any_failures, grades = lms.get_course_run_grades(\n program.discovery_uuid,\n internal_course_key,\n )\n except HTTPError as err:\n post_job_failure(\n job_id,\n f\"HTTP error {err.response.status_code} when getting grades at {err.request.url}\"\n )\n return\n except ValidationError as err:\n post_job_failure(\n job_id,\n f\"Invalid grade data from LMS: {err}\",\n )\n return\n\n if any_successes and any_failures:\n code_str = str(GradeReadStatus.MULTI_STATUS.value)\n elif not any_successes and not any_failures:\n code_str = str(GradeReadStatus.NO_CONTENT.value)\n elif any_successes:\n code_str = str(GradeReadStatus.OK.value)\n else:\n code_str = str(GradeReadStatus.UNPROCESSABLE_ENTITY.value)\n\n if file_format == 'json':\n serialized = json.dumps(grades, indent=4)\n elif file_format == 'csv':\n serialized = serialize_course_run_grades_to_csv(grades)\n else:\n raise ValueError(f'Invalid file_format: {file_format}')\n post_job_success(job_id, serialized, file_format, text=code_str)\n", "repo_name": "openedx/registrar", "sub_path": "registrar/apps/grades/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 2045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "41", "api": [{"api_name": "registrar.apps.core.tasks.get_program", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 32, "usage_type": "name"}, {"api_name": "registrar.apps.core.jobs.post_job_failure", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 38, "usage_type": "name"}, {"api_name": "registrar.apps.core.jobs.post_job_failure", "line_number": 39, "usage_type": "call"}, {"api_name": "constants.GradeReadStatus.MULTI_STATUS", "line_number": 46, "usage_type": "attribute"}, {"api_name": "constants.GradeReadStatus", "line_number": 46, "usage_type": "name"}, {"api_name": "constants.GradeReadStatus.NO_CONTENT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "constants.GradeReadStatus", "line_number": 48, "usage_type": "name"}, {"api_name": "constants.GradeReadStatus.OK", "line_number": 50, "usage_type": "attribute"}, {"api_name": "constants.GradeReadStatus", "line_number": 50, "usage_type": "name"}, {"api_name": "constants.GradeReadStatus.UNPROCESSABLE_ENTITY", "line_number": 52, "usage_type": "attribute"}, {"api_name": "constants.GradeReadStatus", "line_number": 52, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "serializers.serialize_course_run_grades_to_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "registrar.apps.core.jobs.post_job_success", "line_number": 60, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 17, "usage_type": "call"}, {"api_name": "user_tasks.tasks.UserTask", "line_number": 17, "usage_type": "name"}]} {"seq_id": "41003057636", "text": "# import standard plotting and animation\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom IPython.display import clear_output\n\n# import autograd functionality\nfrom autograd import grad as compute_grad # The only autograd function you may ever need\nimport autograd.numpy as np\nfrom autograd import hessian as compute_hess\nimport math\nimport time\nfrom matplotlib import gridspec\nimport copy\n\nclass Visualizer:\n '''\n Visualize an input cost function based on data.\n '''\n \n #### initialize ####\n def __init__(self,data):\n # grab input\n data = data.T\n self.x = data[:,:-1]\n self.y = data[:,-1]\n \n # sigmoid\n def sigmoid(self,t):\n return 1/(1 + np.exp(-t))\n \n # sigmoid non-convex least squares\n def sigmoid_least_squares(self,w):\n cost = 0\n for p in range(0,len(self.y)):\n x_p = self.x[p,:]\n y_p = self.y[p]\n a_p = w[0] + np.sum([u*v for (u,v) in zip(x_p,w[1:])])\n cost +=(self.sigmoid(a_p) - y_p)**2\n return cost\n\n ###### function plotting functions #######\n def plot_costs(self,**kwargs): \n # construct figure\n fig, axs = plt.subplots(1, 2, figsize=(6,3))\n\n # create subplot with 2 panels\n gs = gridspec.GridSpec(1, 2, width_ratios=[.75,1]) \n ax1 = plt.subplot(gs[0]);\n self.scatter_pts(ax1)\n ax2 = plt.subplot(gs[1],projection='3d'); \n\n \n # pull user-defined args\n viewmax = 3\n if 'viewmax' in kwargs:\n viewmax = kwargs['viewmax']\n view = [20,100]\n if 'view' in kwargs:\n view = kwargs['view']\n label_axes = True\n if 'label_axes' in kwargs:\n label_axes = kwargs['label_axes']\n \n # make contour plot in each panel\n g = self.sigmoid_least_squares\n self.surface_plot(g,ax2,viewmax,view)\n \n if label_axes == True:\n ax2.set_xlabel(r'$w_0$',fontsize = 12)\n ax2.set_ylabel(r'$w_1$',fontsize = 12,rotation = 0)\n plt.show()\n \n ### visualize the surface plot of cost function ###\n def surface_plot(self,g,ax,wmax,view):\n ##### Produce cost function surface #####\n r = np.linspace(-wmax,wmax,300)\n\n # create grid from plotting range\n w1_vals,w2_vals = np.meshgrid(r,r)\n w1_vals.shape = (len(r)**2,1)\n w2_vals.shape = (len(r)**2,1)\n w_ = np.concatenate((w1_vals,w2_vals),axis = 1)\n g_vals = []\n for i in range(len(r)**2):\n g_vals.append(g(w_[i,:]))\n g_vals = np.asarray(g_vals)\n \n w1_vals.shape = (np.size(r),np.size(r))\n w2_vals.shape = (np.size(r),np.size(r))\n \n ### is this a counting cost? if so re-calculate ###\n levels = np.unique(g_vals)\n if np.size(levels) < 30:\n # plot each level of the counting cost\n levels = np.unique(g_vals)\n for u in levels:\n # make copy of cost and nan out all non level entries\n z = g_vals.copy()\n ind = np.argwhere(z != u)\n ind = [v[0] for v in ind]\n z[ind] = np.nan\n\n # plot the current level\n z.shape = (len(r),len(r)) \n ax.plot_surface(w1_vals,w2_vals,z,alpha = 1,color = '#696969',zorder = 0,shade = True,linewidth=0)\n\n else: # smooth cost function, plot usual\n # reshape and plot the surface, as well as where the zero-plane is\n g_vals.shape = (np.size(r),np.size(r))\n\n # plot cost surface\n ax.plot_surface(w1_vals,w2_vals,g_vals,alpha = 1,color = 'w',rstride=25, cstride=25,linewidth=1,edgecolor = 'k',zorder = 2) \n \n ### clean up panel ###\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n\n ax.xaxis.pane.set_edgecolor('white')\n ax.yaxis.pane.set_edgecolor('white')\n ax.zaxis.pane.set_edgecolor('white')\n\n ax.xaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n ax.yaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n ax.zaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n\n ax.view_init(view[0],view[1])\n \n \n # scatter points\n def scatter_pts(self,ax):\n if np.shape(self.x)[1] == 1:\n # set plotting limits\n xmax = max(copy.deepcopy(self.x))\n xmin = min(copy.deepcopy(self.x))\n xgap = (xmax - xmin)*0.4\n xmin -= xgap\n xmax += xgap\n \n ymax = max(copy.deepcopy(self.y))\n ymin = min(copy.deepcopy(self.y))\n ygap = (ymax - ymin)*0.4\n ymin -= ygap\n ymax += ygap \n\n # initialize points\n ax.scatter(self.x,self.y,color = 'k', edgecolor = 'w',linewidth = 0.9,s = 40)\n\n # clean up panel\n ax.set_xlim([xmin,xmax])\n ax.set_ylim([ymin,ymax])\n \n # label axes\n ax.set_xlabel(r'$x$', fontsize = 12)\n ax.set_ylabel(r'$y$', rotation = 0,fontsize = 12)\n \n ax.axhline(y=0, color='k',zorder = 0,linewidth = 0.5)\n ax.axvline(x=0, color='k',zorder = 0,linewidth = 0.5)\n \n if np.shape(self.x)[1] == 2:\n # set plotting limits\n xmax1 = copy.deepcopy(max(self.x[:,0]))\n xmin1 = copy.deepcopy(min(self.x[:,0]))\n xgap1 = (xmax1 - xmin1)*0.35\n xmin1 -= xgap1\n xmax1 += xgap1\n \n xmax2 = copy.deepcopy(max(self.x[:,0]))\n xmin2 = copy.deepcopy(min(self.x[:,0]))\n xgap2 = (xmax2 - xmin2)*0.35\n xmin2 -= xgap2\n xmax2 += xgap2\n \n ymax = max(self.y)\n ymin = min(self.y)\n ygap = (ymax - ymin)*0.2\n ymin -= ygap\n ymax += ygap \n\n # initialize points\n ax.scatter(self.x[:,0],self.x[:,1],self.y,s = 40,color = 'k', edgecolor = 'w',linewidth = 0.9)\n\n # clean up panel\n ax.set_xlim([xmin1,xmax1])\n ax.set_ylim([xmin2,xmax2])\n ax.set_zlim([ymin,ymax])\n \n ax.set_xticks(np.arange(round(xmin1) +1, round(xmax1), 1.0))\n ax.set_yticks(np.arange(round(xmin2) +1, round(xmax2), 1.0))\n\n # label axes\n ax.set_xlabel(r'$x_1$', fontsize = 12,labelpad = 5)\n ax.set_ylabel(r'$x_2$', rotation = 0,fontsize = 12,labelpad = 5)\n ax.set_zlabel(r'$y$', rotation = 0,fontsize = 12,labelpad = -3)\n\n # clean up panel\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n\n ax.xaxis.pane.set_edgecolor('white')\n ax.yaxis.pane.set_edgecolor('white')\n ax.zaxis.pane.set_edgecolor('white')\n\n ax.xaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n ax.yaxis._axinfo[\"grid\"]['color'] = (1,1,1,0)\n ax.zaxis._axinfo[\"grid\"]['color'] = (1,1,1,0) \n ", "repo_name": "jermwatt/machine_learning_refined", "sub_path": "notes/13_Multilayer_perceptrons/chapter_13_library/LS_sigmoid.py", "file_name": "LS_sigmoid.py", "file_ext": "py", "file_size_in_byte": 7169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1452, "dataset": "github-code", "pt": "41", "api": [{"api_name": "autograd.numpy.exp", "line_number": 30, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 30, "usage_type": "name"}, {"api_name": "autograd.numpy.sum", "line_number": 38, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "autograd.numpy.linspace", "line_number": 77, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 77, "usage_type": "name"}, {"api_name": "autograd.numpy.meshgrid", "line_number": 80, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 80, "usage_type": "name"}, {"api_name": "autograd.numpy.concatenate", "line_number": 83, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 83, "usage_type": "name"}, {"api_name": "autograd.numpy.asarray", "line_number": 87, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 87, "usage_type": "name"}, {"api_name": "autograd.numpy.size", "line_number": 89, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 89, "usage_type": "name"}, {"api_name": "autograd.numpy.size", "line_number": 90, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 90, "usage_type": "name"}, {"api_name": "autograd.numpy.unique", "line_number": 93, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 93, "usage_type": "name"}, {"api_name": "autograd.numpy.size", "line_number": 94, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 94, "usage_type": "name"}, {"api_name": "autograd.numpy.unique", "line_number": 96, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 96, "usage_type": "name"}, {"api_name": "autograd.numpy.argwhere", "line_number": 100, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 100, "usage_type": "name"}, {"api_name": "autograd.numpy.nan", "line_number": 102, "usage_type": "attribute"}, {"api_name": "autograd.numpy", "line_number": 102, "usage_type": "name"}, {"api_name": "autograd.numpy.size", "line_number": 110, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 110, "usage_type": "name"}, {"api_name": "autograd.numpy.shape", "line_number": 133, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 133, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 135, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 136, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 141, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 142, "usage_type": "call"}, {"api_name": "autograd.numpy.shape", "line_number": 161, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 161, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 163, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 164, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 169, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 170, "usage_type": "call"}, {"api_name": "autograd.numpy.arange", "line_number": 189, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 189, "usage_type": "name"}, {"api_name": "autograd.numpy.arange", "line_number": 190, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 190, "usage_type": "name"}]} {"seq_id": "40516794492", "text": "import numpy as np\nimport os\nimport nibabel\nimport torch\nimport pandas as pd\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\nfrom collections import OrderedDict\nfrom nilearn.masking import unmask\n\ndef bin_age(age_real: torch.Tensor):\n bins = [i for i in range(4, 92, 2)]\n age_binned = age_real.clone()\n for value in bins[::-1]:\n age_binned[age_real <= value] = value\n return age_binned.long()\n\ndef read_data(path, dataset, fast):\n print(f\"Read {dataset.upper()}\")\n df = pd.read_csv(os.path.join(path, dataset + \".tsv\"), sep=\"\\t\")\n df.loc[df[\"split\"] == \"external_test\", \"site\"] = np.nan\n\n y_arr = df[[\"age\", \"site\"]].values\n\n x_arr = np.zeros((10, 3659572))\n if not fast:\n x_arr = np.load(os.path.join(path, dataset + \".npy\"), mmap_mode=\"r\")\n \n print(\"- y size [original]:\", y_arr.shape)\n print(\"- x size [original]:\", x_arr.shape)\n return x_arr, y_arr\n\nclass OpenBHB(torch.utils.data.Dataset):\n def __init__(self, root, train=True, internal=True, transform=None, \n label=\"cont\", fast=False, load_feats=None):\n self.root = root\n\n if train and not internal:\n raise ValueError(\"Invalid configuration train=True and internal=False\")\n \n self.train = train\n self.internal = internal\n \n dataset = \"train\"\n if not train:\n if internal:\n dataset = \"internal_test\"\n else:\n dataset = \"external_test\"\n \n self.X, self.y = read_data(root, dataset, fast)\n self.T = transform\n self.label = label\n self.fast = fast\n\n self.bias_feats = None\n if load_feats:\n print(\"Loading biased features\", load_feats)\n self.bias_feats = torch.load(load_feats, map_location=\"cpu\")\n \n print(f\"Read {len(self.X)} records\")\n\n def __len__(self):\n return len(self.y)\n\n def __getitem__(self, index):\n if not self.fast:\n x = self.X[index]\n else:\n x = self.X[0]\n\n y = self.y[index]\n\n if self.T is not None:\n x = self.T(x)\n \n # sample, age, site\n age, site = y[0], y[1]\n if self.label == \"bin\":\n age = bin_age(torch.tensor(age))\n \n if self.bias_feats is not None:\n return x, age, self.bias_feats[index]\n else:\n return x, age, site\n\nclass FeatureExtractor(BaseEstimator, TransformerMixin):\n \"\"\" Select only the requested data associatedd features from the the\n input buffered data.\n \"\"\"\n MODALITIES = OrderedDict([\n (\"vbm\", {\n \"shape\": (1, 121, 145, 121),\n \"size\": 519945}),\n (\"quasiraw\", {\n \"shape\": (1, 182, 218, 182),\n \"size\": 1827095}),\n (\"xhemi\", {\n \"shape\": (8, 163842),\n \"size\": 1310736}),\n (\"vbm_roi\", {\n \"shape\": (1, 284),\n \"size\": 284}),\n (\"desikan_roi\", {\n \"shape\": (7, 68),\n \"size\": 476}),\n (\"destrieux_roi\", {\n \"shape\": (7, 148),\n \"size\": 1036})\n ])\n MASKS = {\n \"vbm\": {\n \"path\": None,\n \"thr\": 0.05},\n \"quasiraw\": {\n \"path\": None,\n \"thr\": 0}\n }\n\n def __init__(self, dtype, mock=False):\n \"\"\" Init class.\n Parameters\n ----------\n dtype: str\n the requested data: 'vbm', 'quasiraw', 'vbm_roi', 'desikan_roi',\n 'destrieux_roi' or 'xhemi'.\n \"\"\"\n if dtype not in self.MODALITIES:\n raise ValueError(\"Invalid input data type.\")\n self.dtype = dtype\n\n data_types = list(self.MODALITIES.keys())\n index = data_types.index(dtype)\n \n cumsum = np.cumsum([item[\"size\"] for item in self.MODALITIES.values()])\n \n if index > 0:\n self.start = cumsum[index - 1]\n else:\n self.start = 0\n self.stop = cumsum[index]\n \n self.masks = dict((key, val[\"path\"]) for key, val in self.MASKS.items())\n self.masks[\"vbm\"] = \"./data/masks/cat12vbm_space-MNI152_desc-gm_TPM.nii.gz\"\n self.masks[\"quasiraw\"] = \"./data/masks/quasiraw_space-MNI152_desc-brain_T1w.nii.gz\"\n\n self.mock = mock\n if mock:\n return\n\n for key in self.masks:\n if self.masks[key] is None or not os.path.isfile(self.masks[key]):\n raise ValueError(\"Impossible to find mask:\", key, self.masks[key])\n arr = nibabel.load(self.masks[key]).get_fdata()\n thr = self.MASKS[key][\"thr\"]\n arr[arr <= thr] = 0\n arr[arr > thr] = 1\n self.masks[key] = nibabel.Nifti1Image(arr.astype(int), np.eye(4))\n\n def fit(self, X, y):\n return self\n\n def transform(self, X):\n if self.mock:\n #print(\"transforming\", X.shape)\n data = X.reshape(self.MODALITIES[self.dtype][\"shape\"])\n #print(\"mock data:\", data.shape)\n return data\n \n # print(X.shape)\n select_X = X[self.start:self.stop]\n if self.dtype in (\"vbm\", \"quasiraw\"):\n im = unmask(select_X, self.masks[self.dtype])\n select_X = im.get_fdata()\n select_X = select_X.transpose(2, 0, 1)\n select_X = select_X.reshape(self.MODALITIES[self.dtype][\"shape\"])\n # print('transformed.shape', select_X.shape)\n return select_X\n\n\nif __name__ == '__main__':\n import sys\n from torchvision import transforms\n from .transforms import Crop, Pad\n\n selector = FeatureExtractor(\"vbm\")\n\n T_pre = transforms.Lambda(lambda x: selector.transform(x))\n T_train = transforms.Compose([\n T_pre,\n Crop((1, 121, 128, 121), type=\"random\"),\n Pad((1, 128, 128, 128)),\n transforms.Lambda(lambda x: torch.from_numpy(x)),\n transforms.Normalize(mean=0.0, std=1.0)\n ])\n\n train_loader = torch.utils.data.DataLoader(OpenBHB(sys.argv[1], train=True, internal=True, transform=T_train),\n batch_size=3, shuffle=True, num_workers=8,\n persistent_workers=True)\n \n x, y1, y2 = next(iter(train_loader))\n print(x.shape, y1, y2)", "repo_name": "EIDOSLAB/contrastive-brain-age-prediction", "sub_path": "src/data/openbhb.py", "file_name": "openbhb.py", "file_ext": "py", "file_size_in_byte": 6325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.Tensor", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.utils", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 87, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 87, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "nibabel.load", "line_number": 154, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 158, "usage_type": "call"}, {"api_name": "nilearn.masking.unmask", "line_number": 173, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 188, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 188, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 189, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 189, "usage_type": "name"}, {"api_name": "transforms.Crop", "line_number": 191, "usage_type": "call"}, {"api_name": "transforms.Pad", "line_number": 192, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 193, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 193, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 193, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 194, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 197, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 197, "usage_type": "attribute"}]} {"seq_id": "73305640444", "text": "from django.shortcuts import render\nimport pickle\nimport numpy as np\nimport pandas as pd\n\n## for Ipl\nmodel_path_ipl='model/pipetipl.pkl'\npipe_ipl=pickle.load(open(model_path_ipl,'rb'))\ndef ipl_pridict(request):\n if request.method=='POST':\n batting_team=request.POST['batting_team']\n bowling_team=request.POST['bowling_team']\n city=request.POST['city']\n current_score=request.POST['current_score']\n overs=request.POST['overs']\n wickets=request.POST['wickets']\n last_five=request.POST['last_five']\n fields=[batting_team,bowling_team,city,current_score,overs,wickets,last_five]\n if batting_team==bowling_team:\n return render(request,'index.html',{'Error':'Batting Team And Bowling Team Can not Be same'})\n if not None in fields:\n overs=float(overs)\n if overs>=5 and overs<=19:\n wickets=float(wickets)\n current_score=float(current_score)\n last_five=float(last_five)\n balls_left=120-(overs*6)\n wickets_left=10-wickets\n crr=current_score/overs\n input=pd.DataFrame([[batting_team,bowling_team,city,current_score,balls_left,wickets_left,crr,last_five]],columns=['batting_team','bowling_team','city','current_score','balls_left','wickets_left','crr','last_five'])\n result=pipe_ipl.predict(input)[0]\n result=np.round(result,0)\n result_dict={'Batting_team':batting_team,\n 'Bowling_team':bowling_team,\n 'Current_Runs':current_score,\n 'overs':overs,\n 'wickets':wickets,\n 'result':result}\n return render(request,'index.html',{'dict':result_dict})\n return render(request,'index.html',{'Error':'Please Type Correct Input'}) \n return render(request,'index.html')\n\n\n\n\n", "repo_name": "Rammy12/IplScorePridictor", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1964, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pickle.load", "line_number": 8, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}]} {"seq_id": "26247681247", "text": "\nimport serial\nimport time\nimport datetime\n\nlist_quality = [\n \"160x120\",\n \"176x144\",\n \"320x240\",\n \"352x288\",\n \"640x480\",\n \"800x600\",\n \"1024x768\",\n \"1280x1024\"\n]\n\nitem_list = []\nser1 = serial.Serial('/dev/ttyUSB0', 921600, timeout=1.5)\n\n\nclass Cameras:\n def __init__(self, num, serial_in, list_items, list_qual):\n self.num = num\n self.serial_in = serial_in\n self.list_items = list_items\n self.list_qual = list_qual\n\n def take_photo(self):\n data_in = []\n self.serial_in.write(b'1')\n while self.serial_in.inWaiting() == 0:\n pass\n while True:\n try:\n data_in.append(int(self.serial_in.read(1)[-1]).to_bytes(1, 'little'))\n except:\n print(\"Bytes received = \" + str(data_in.__len__()))\n break\n stamp = str(datetime.datetime.time(datetime.datetime.today()))\n stamp = str(stamp[0:2] + \"_\" + stamp[3:5] + \"_\" + stamp[6:8])\n photo = open('photos/testing_' + stamp + '.jpg', 'wb')\n for i in range(data_in.__len__()):\n photo.write(data_in[i])\n photo.close()\n data_in.clear()\n\n def change_q(self, qual_num):\n qual = qual_num + 2\n self.serial_in.write(str(qual).encode())\n\n\nif __name__ == \"__main__\":\n cam1 = Cameras(0, ser1, item_list, list_quality)\n cam1.change_q(7)\n time.sleep(3)\n while True:\n cam1.take_photo()\n for i in range(30):\n time.sleep(60)\n print(i+1)\n", "repo_name": "NikolayKadiev/Modules-and-stuff", "sub_path": "OV2640_mini_2mp_plus/ardu_cam_timer.py", "file_name": "ardu_cam_timer.py", "file_ext": "py", "file_size_in_byte": 1531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "serial.Serial", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.time", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}]} {"seq_id": "18560344509", "text": "from typing import *\nimport pastel\n\nclass Logger:\n def __init__(self, level:int=1):\n self.level = level\n self.debug = self._make_log_function(\n level=4,\n style_normal='options=dark',\n style_value_primary='options=dark;fg=cyan',\n style_value_secondary='options=dark;fg=blue'\n )\n self.info = self._make_log_function(\n level=3,\n style_normal='fg=default',\n style_value_primary='fg=cyan',\n style_value_secondary='fg=blue'\n )\n self.warn = self._make_log_function(\n level=2,\n style_normal='fg=yellow',\n style_value_primary='fg=default',\n style_value_secondary='options=bold'\n )\n self.error = self._make_log_function(\n level=1,\n style_normal='fg=red',\n style_value_primary='fg=default',\n style_value_secondary='options=bold'\n )\n self.success = self._make_log_function(\n level=3,\n style_normal='fg=green',\n style_value_primary='fg=default',\n style_value_secondary='options=bold'\n )\n\n def _make_log_function(self, level, style_normal, style_value_primary, style_value_secondary):\n def _function(msg: str, data1=None, data2=None):\n msg = msg.format(\n f'<{style_value_primary}>{data1}' if data1 is not None else '',\n f'<{style_value_secondary}>{data2}' if data2 is not None else ''\n )\n if self.level >= level:\n print(pastel.colorize(f'<{style_normal}>{msg}'))\n return _function\n", "repo_name": "ewen-lbh/check-availability", "sub_path": "check_availability/log.py", "file_name": "log.py", "file_ext": "py", "file_size_in_byte": 1511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pastel.colorize", "line_number": 45, "usage_type": "call"}]} {"seq_id": "36066385686", "text": "\"\"\"\n API TEST \n\n    完成指定接口的参数测试,运算效率测试\n\n\n    Bingo\n\n\n\n\"\"\"\nimport requests\nimport json, random, copy\nfrom optparse import OptionParser\n\ndef initData(p,typ):\n\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n character = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',\n 'u', 'v', 'w', 's', 'y', 'z']\n special = ['~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '-', '+', '=', '{', '}', '|', '\\\\', '[',\n ']', '\\'', ':', '\"', ';', '<', '>', '?', '?', ',', '.', '/', ' ']\n if typ == 'string':\n return random.choice(character)\n if typ == 'integer':\n return int(random.choice(numbers))\n\ndef getLog(way,*args):\n if way == True: #如果方式为0,直接输出到控制台\n for message in args:\n print(message)\n else:\n for message in args:\n with open(\"Result.txt\", \"a+\") as f:\n f.write(message)\n f.write('\\r\\n')\n\ndef transHttp(ConfigHttp,method):\n global maxtime,resdict\n if method == \"get\":\n resp = ConfigHttp.get()\n else:\n resp = ConfigHttp.post()\n tim = resp.elapsed.total_seconds() * 1000 # 接口效率\n if int(tim) > maxtime:\n maxtime = int(tim)\n resdict = ConfigHttp.data\n return json.loads(str(resp.content, 'utf-8'))\n\nclass ConfigHttp:\n def __init__(self):\n global timeout\n timeout = 2\n self.headers = {}\n self.params = {}\n self.data = {}\n self.host = None\n self.url = None\n self.files = {}\n self.response={}\n self.path = None\n\n def set_url(self, url):\n self.url = self.host + url\n\n def set_headers(self, header):\n self.headers = header\n\n def set_params(self, param):\n self.params = param\n\n def set_host(self, host):\n self.host = host\n\n def set_data(self, data):\n self.data = data\n\n def set_files(self, file):\n self.files = file\n\n # defined http get method\n def get(self):\n try:\n # response = requests.get(self.url, params=self.params, headers=self.headers, timeout=float(timeout))\n response = requests.get(self.url, params=self.data, headers=self.headers, timeout=float(timeout))\n\n # response.raise_for_status()\n\n #return json.loads(str(response.content, 'utf-8'))\n return response #返回response,提取其中的响应时间,做效率判定\n except TimeoutError:\n # self.logger.error(\"Time out!\")\n return None\n\n\n # defined http post method\n def post(self):\n try:\n response = requests.post(self.url, headers=self.headers, data=self.data, files=self.files,\n timeout=float(timeout))\n # response.raise_for_status()\n #return json.loads(str(response.content, 'utf-8'))\n return response\n except TimeoutError:\n # self.logger.error(\"Time out!\")\n return None\n\nspecial = ['~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '-', '+', '=', '{', '}', '|', '\\\\', '[',\n ']', '\\'', ':', '\"', ';', '<', '>', '?', '?', ',', '.', '/', ' ']\nstatus = {\"success\": 2000, \"fail\":5000,\"missarg\":4010}\nargType = ['string', 'integer']\nlimiTime = 500 #接口返回效率限制,超过该值将print提醒\ngeShi = {\"tab\": \" \"}\n\n\nif __name__ == '__main__':\n\n optparser = OptionParser()\n #  add the private options here:\n optparser.add_option(\"-c\", action=\"store_true\", dest=\"way\", help=\"Log打印直接输出到控制台\")\n optparser.add_option(\"-t\", action=\"store_false\", dest=\"way\", help=\"Log打印输入至result.txt\")\n optparser.add_option(\"-p\",\"--path\", type=str, dest=\"path\", help=\"要测试的接口路径,例如:/search/goods\")\n\n (options, args) = optparser.parse_args()\n way = options.way\n path = options.path\n\n # 需要输入way,path参数,否则退出\n if way == None or path == None:\n optparser.print_help()\n exit()\n\n\n API = ConfigHttp()\n url = \"http://172.16.31.42:7211/v2/api-docs\"\n res_html = requests.get(url)\n res_dict = json.loads(res_html.text)\n host = res_dict[\"host\"]\n API.set_host(\"http://\"+host)\n header = {\"x-uc-userdata\": \"{id:1}\"}\n API.set_headers(header)\n paths = res_dict[\"paths\"]\n path_will_test = []\n # path_will_test.append(\"/search/goods/associativeWords\")\n # path_will_test.append(\"/search/goods\")\n path_will_test.append(path)\n\n for i, j in paths.items():\n if i in path_will_test:\n global maxtime,resdict\n maxtime = 0 #记录接口性能时间,只保留最长时间\n resdict = {} #记录耗时最长时的请求参数\n API.path = i\n API.set_url(i) #初始化 url\n api_methods = list(j.keys())[0]\n dict_values = list(j.values())[0]\n getLog(way,dict_values[\"summary\"], \"路径:\"+API.path, \"方法:\"+api_methods)\n\n API.data = {}\n\n # 初始化生成正确的data,暂时不处理header参数,并进行正确参数测试\n for parameter in dict_values[\"parameters\"]:\n if parameter['in'] != 'header':\n API.data[parameter['name']] = initData(parameter, parameter['type'])\n dataRight = copy.deepcopy(API.data) #基准正确参数保留\n res = transHttp(API, api_methods)\n if res['statusCode'] == status[\"success\"]:\n getLog(way, \"pass————冒烟测试\")\n\n else:\n getLog(way, \"fail————冒烟测试\", geShi[\"tab\"]+\"错误返回:\"+json.dumps(res))\n getLog(way, geShi[\"tab\"] + \"请求数据:\" + str(API.data))\n\n getLog(way,\"<<<<<<<参数类型异常测试>>>>>>\")\n for parameter in dict_values[\"parameters\"]:\n if parameter['in'] != 'header':\n for typ in argType:\n if typ != parameter['type']:\n API.data[parameter['name']] = initData(parameter, typ)\n res = transHttp(API, api_methods)\n if res['statusCode'] == status[\"fail\"]:\n getLog(way,\"pass————\"+ parameter[\"name\"]+\"参数要求类型为\"+parameter['type']+\",使用\"+typ+\"类型测试\")\n else:\n getLog(way,\"fail————\"+parameter[\"name\"]+\"参数要求类型为\"+parameter['type']+\",使用\"+typ+\"类型测试\", geShi[\"tab\"]+\"错误返回:\" + json.dumps(res))\n getLog(way,geShi[\"tab\"]+\"请求数据:\" + str(API.data))\n API.data = copy.deepcopy(dataRight) #重置数据,进行下一个参数修改测试\n\n getLog(way,\"<<<<<<参数必要性测试>>>>>>:\")\n for parameter in dict_values[\"parameters\"]:\n if parameter['in'] != 'header':\n API.data.pop(parameter['name'])\n res = transHttp(API, api_methods)\n if parameter['required'] == True:\n if res['statusCode'] == status[\"missarg\"]:\n getLog(way,\"pass————\"+parameter[\"name\"] + \"参数要求为必要,缺失类型测试\")\n else:\n getLog(way,\"fail————\"+parameter[\"name\"] + \"参数要求为必要,缺失类型测试\",\n geShi[\"tab\"]+\"错误返回:\" + json.dumps(res))\n getLog(way,geShi[\"tab\"]+\"请求数据:\" + str(API.data))\n else:\n if res['statusCode'] == status[\"success\"]:\n getLog(way,\"pass————\"+parameter[\"name\"] + \"参数要求为非必要,缺失类型测试\")\n else:\n getLog(way,\"fail————\"+parameter[\"name\"] + \"参数要求为非必要,缺失类型测试\",\n geShi[\"tab\"]+\"错误返回:\" + json.dumps(res))\n getLog(way,geShi[\"tab\"]+\"请求数据:\" + str(API.data))\n API.data = copy.deepcopy(dataRight) #重置数据,进行下一个参数修改测试\n for parameter in dict_values[\"parameters\"]:\n if parameter['in'] != 'header':\n if parameter['required'] == False:\n API.data.pop(parameter['name'])\n res = transHttp(API, api_methods)\n if res['statusCode'] != status[\"success\"]:\n getLog(way,\"fail——非必要参数全部缺失测试:\",\n \"错误返回:\" + json.dumps(res))\n getLog(way,\"请求数据:\" + str(API.data))\n API.data = copy.deepcopy(dataRight) # 重置数据,进行下一个参数修改测试\n\n getLog(\"<<<<<<参数特殊字符遍历测试>>>>>>\") #当参数类型为string则进行该测试\n for parameter in dict_values[\"parameters\"]:\n tag = 0 #作为全部通过标志\n if parameter['in'] != 'header':\n if parameter['type'] ==\"string\":\n for c in special:\n API.data[parameter['name']] = c\n res = transHttp(API, api_methods)\n if res['statusCode'] != status[\"success\"]:\n tag = 1\n getLog(\n way,\"fail————\"+parameter[\"name\"] + \"特殊字符<\"+c + \">测试\",\n geShi[\"tab\"]+\"错误返回:\" + json.dumps(res))\n getLog(way,geShi[\"tab\"]+\"请求数据:\" + str(API.data))\n if tag == 0:\n getLog(way,\"pass————\"+parameter[\"name\"] + \"特殊字符测试\")\n API.data = copy.deepcopy(dataRight) # 重置数据,进行下一个参数修改测试\n\n getLog(\"<<<<<<接口性能测试>>>>>>\") # 当参数类型为string则进行该测试\n if maxtime > limiTime:\n getLog(\n way,\"fail————接口耗时最长为:\"+str(maxtime)+\"ms,超过设定限定值\"+str(limiTime)+\"ms\",\n geShi[\"tab\"] + \"请求数据:\" + json.dumps(resdict))\n else:\n getLog(way,\"pass————接口耗时最长为:\"+str(maxtime)+\"ms,小于设定限定值\"+str(limiTime)+\"ms\")\n\n\n\n\n\n\n\n", "repo_name": "bingosong17/test1", "sub_path": "autotestServer/code/spider.py", "file_name": "spider.py", "file_ext": "py", "file_size_in_byte": 10827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "random.choice", "line_number": 24, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 85, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 99, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 118, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 136, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 137, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 165, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 171, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 184, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 186, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 198, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 205, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 207, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 215, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 217, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 231, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 235, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 241, "usage_type": "call"}]} {"seq_id": "2232916424", "text": "import talib\nimport numpy as np\nfrom binance.client import Client\nimport time\nfrom tradingview_ta import TA_Handler, Interval, Exchange\ntesla = TA_Handler(\n symbol=\"TSLA\",\n screener=\"america\",\n exchange=\"NASDAQ\",\n interval=Interval.INTERVAL_1_MINUTE\n)\n\napi_key = ''\napi_secret = ''\n\nclient = Client(api_key, api_secret, testnet=True)\n\nsymbol = 'BTCUSDT'\nquantity = 0.001\n\ndef get_indicators(symbol):\n candles = client.get_klines(symbol=symbol, interval=Client.KLINE_INTERVAL_1MINUTE)\n print(candles)\n closes = np.array([float(candle[4]) for candle in candles])\n rsi = talib.RSI(closes, timeperiod=14)\n adx = talib.ADX(closes, closes, closes, timeperiod=14)\n upper, middle, lower = talib.BBANDS(closes, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0)\n return rsi[-1], adx[-1], closes[-1], upper[-1], middle[-1], lower[-1]\n\ndef place_order(symbol, side, quantity):\n try:\n order = client.create_order(symbol=symbol, side=side, type=Client.ORDER_TYPE_MARKET, quantity=quantity)\n print(order)\n except Exception as e:\n print(e)\n\nwhile True:\n try:\n rsi, adx, close, upper, middle, lower = get_indicators(symbol)\n print(\"rsi:\", rsi)\n print(\"adx:\", adx)\n print(\"close:\", close)\n print(\"upper:\", upper)\n print(\"middle:\", middle)\n print(\"lower:\", lower)\n if rsi < 30 and adx > 25 and close < lower:\n print('Placing buy order')\n place_order(symbol, Client.SIDE_BUY, quantity)\n elif rsi > 70 and adx > 25 and close > upper:\n print('Placing sell order')\n place_order(symbol, Client.SIDE_SELL, quantity)\n except Exception as e:\n print(e)\n\n time.sleep(60)", "repo_name": "RezaHedayatkhah/CryptoTrader", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "tradingview_ta.TA_Handler", "line_number": 6, "usage_type": "call"}, {"api_name": "tradingview_ta.Interval.INTERVAL_1_MINUTE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tradingview_ta.Interval", "line_number": 10, "usage_type": "name"}, {"api_name": "binance.client.Client", "line_number": 16, "usage_type": "call"}, {"api_name": "binance.client.Client.KLINE_INTERVAL_1MINUTE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "binance.client.Client", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "talib.RSI", "line_number": 25, "usage_type": "call"}, {"api_name": "talib.ADX", "line_number": 26, "usage_type": "call"}, {"api_name": "talib.BBANDS", "line_number": 27, "usage_type": "call"}, {"api_name": "binance.client.Client.ORDER_TYPE_MARKET", "line_number": 32, "usage_type": "attribute"}, {"api_name": "binance.client.Client", "line_number": 32, "usage_type": "name"}, {"api_name": "binance.client.Client.SIDE_BUY", "line_number": 48, "usage_type": "attribute"}, {"api_name": "binance.client.Client", "line_number": 48, "usage_type": "name"}, {"api_name": "binance.client.Client.SIDE_SELL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "binance.client.Client", "line_number": 51, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}]} {"seq_id": "73100410043", "text": "#!/usr/bin/env ipy\n\n##############################################################################\n# Written by: Calen Chen \n# Ray Wang \n# Date: 03/11/2008\n# Description: This is a test application sample for winforms control:\n# TextBox\n##############################################################################\n\nimport clr\n\nclr.AddReference('System.Windows.Forms')\nclr.AddReference('System.Drawing')\n\nfrom System.Drawing import *\nfrom System.Windows.Forms import *\n\n\nclass TextBoxApp(Form):\n\n def __init__(self):\n self.Text = \"TextBox Control\"\n self.Height = 335\n\n self.label1 = Label()\n self.label1.Text = \"Normal TextBox\"\n self.label1.AccessibleName = \"explicitly set name for label\"\n self.label1.Dock = DockStyle.Bottom\n \n self.textbox1 = TextBox()\n self.textbox1.AcceptsTab = True\n self.textbox1.AcceptsReturn = True\n self.textbox1.Dock = DockStyle.Bottom\n self.textbox1.Name = \"self.textbox1\"\n self.textbox1.AccessibleName = \"explicitly set name for textbox\"\n self.textbox1.TextChanged += self.textbox1_enter\n\n # create a password entry textbox to display asterisks \n # instead of the text typed\n self.label2 = Label()\n self.label2.Text = \"Multi-Line TextBox\"\n self.label2.Dock = DockStyle.Bottom\n self.label2.Height = 80\n\n self.textbox2 = TextBox()\n self.textbox2.Dock = DockStyle.Bottom\n self.textbox2.Height = 100\n self.textbox2.Multiline = True\n self.textbox2.ScrollBars = ScrollBars.Both\n self.textbox2.AcceptsTab = True\n self.textbox2.AcceptsReturn = True\n self.textbox2.WordWrap = False\n self.textbox2.TextChanged += self.textbox2_enter\n\n self.label3 = Label()\n self.label3.Text = \"Password TextBox\"\n self.label3.Dock = DockStyle.Bottom\n\n self.textbox3 = TextBox()\n self.textbox3.Dock = DockStyle.Bottom\n self.textbox3.UseSystemPasswordChar = True\n self.textbox3.TextChanged += self.textbox3_enter\n\n self.label4 = Label()\n self.label4.Text = \"non-Editable TextBox\"\n self.label4.Dock = DockStyle.Bottom\n\n self.textbox4 = TextBox()\n self.textbox4.Enabled = False\n self.textbox4.Dock = DockStyle.Bottom\n\n self.Controls.Add(self.label1)\n self.Controls.Add(self.textbox1)\n self.Controls.Add(self.label2)\n self.Controls.Add(self.textbox2)\n self.Controls.Add(self.label3)\n self.Controls.Add(self.textbox3)\n self.Controls.Add(self.label4)\n self.Controls.Add(self.textbox4)\n\n def textbox1_enter(self, sender, event):\n self.label1.Text = self.textbox1.Text\n\n def textbox2_enter(self, sender, event):\n self.label2.Text = self.textbox2.Text\n\n def textbox3_enter(self, sender, event):\n self.label3.Text = self.textbox3.Text\n\nform = TextBoxApp()\nApplication.Run(form)\n", "repo_name": "mono/uia2atk", "sub_path": "test/samples/winforms/textbox.py", "file_name": "textbox.py", "file_ext": "py", "file_size_in_byte": 3022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "41", "api": [{"api_name": "clr.AddReference", "line_number": 13, "usage_type": "call"}, {"api_name": "clr.AddReference", "line_number": 14, "usage_type": "call"}]} {"seq_id": "42761456839", "text": "from rest_framework import serializers\n\nfrom .models import Group, Video, ImageModel\n\n\nclass ImageModelSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = ImageModel\n fields = '__all__'\n\n def update(self, instance, validated_data):\n # Delete old image file before setting new one\n if instance.image:\n instance.image.delete()\n return super().update(instance, validated_data)\n\n\nclass VideoReadSerializer(serializers.ModelSerializer):\n aliases = serializers.ListField(source='get_aliases')\n links = serializers.ListField(source='get_links')\n\n class Meta:\n model = Video\n fields = '__all__'\n extra_kwargs = {\n 'alias': {'write_only': True},\n 'links_arr': {'write_only': True},\n }\n\n\nclass VideoWriteSerializer(serializers.ModelSerializer):\n aliases = serializers.ListField()\n links = serializers.ListField()\n\n class Meta:\n model = Video\n fields = '__all__'\n extra_kwargs = {\n 'alias': {'read_only': True},\n 'links_arr': {'read_only': True},\n }\n\n def validate(self, attrs):\n attrs['alias'] = Group.build_alias(attrs.pop('aliases'))\n attrs['links_arr'] = Group.build_links(attrs.pop('links'))\n return attrs\n\n def update(self, instance, validated_data):\n old_order = instance.order\n new_order = validated_data[\"order\"]\n if old_order != new_order:\n instance.reorder(old_order, new_order)\n return super().update(instance, validated_data)\n\n def create(self, validated_data):\n instance = super().create(validated_data)\n instance.created()\n return instance\n\n def to_representation(self, instance):\n serializer = GroupReadSerializer(instance=instance.group, context=self.context)\n return serializer.data\n\n\nclass GroupReadSerializer(serializers.ModelSerializer):\n videos = VideoReadSerializer(many=True)\n images = ImageModelSerializer(many=True)\n aliases = serializers.ListField(source='get_aliases')\n links = serializers.ListField(source='get_links')\n\n class Meta:\n model = Group\n fields = '__all__'\n extra_kwargs = {\n 'alias': {'write_only': True},\n 'links_arr': {'write_only': True},\n }\n\n\nclass GroupWriteSerializer(serializers.ModelSerializer):\n aliases = serializers.ListField()\n links = serializers.ListField()\n\n class Meta:\n model = Group\n fields = '__all__'\n extra_kwargs = {\n 'alias': {'read_only': True},\n 'links_arr': {'read_only': True},\n }\n\n def validate(self, attrs):\n attrs['alias'] = Group.build_alias(attrs.pop('aliases'))\n attrs['links_arr'] = Group.build_links(attrs.pop('links'))\n return attrs\n\n def to_representation(self, instance):\n serializer = GroupReadSerializer(instance, context=self.context)\n return serializer.data\n", "repo_name": "Mefodii/ReganamKevolehc_BE", "sub_path": "watching/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 2984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "models.ImageModel", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 21, "usage_type": "name"}, {"api_name": "models.Video", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 32, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Video", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Group.build_alias", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 45, "usage_type": "name"}, {"api_name": "models.Group.build_links", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 46, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 66, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 66, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 69, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 69, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 70, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 70, "usage_type": "name"}, {"api_name": "models.Group", "line_number": 73, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 81, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 81, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 82, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 82, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 83, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Group", "line_number": 86, "usage_type": "name"}, {"api_name": "models.Group.build_alias", "line_number": 94, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 94, "usage_type": "name"}, {"api_name": "models.Group.build_links", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 95, "usage_type": "name"}]} {"seq_id": "28843383510", "text": "\"\"\"\n@file\n@brief Helpers to convert docstring to various format.\n\"\"\"\nimport os\nimport sys\nfrom collections import deque\nimport warnings\nimport pickle\nimport platform\nfrom html import escape as htmlescape\nfrom io import StringIO\nfrom docutils.parsers.rst import roles\nfrom docutils.languages import en as docutils_en\nfrom docutils import nodes\nfrom docutils.utils import Reporter\nfrom sphinx.application import Sphinx\nfrom sphinx.environment import BuildEnvironment\nfrom sphinx.errors import ExtensionError\nfrom sphinx.ext.extlinks import setup_link_roles\nfrom sphinx.transforms import SphinxTransformer\nfrom sphinx.writers.html import HTMLWriter\nfrom sphinx.util.build_phase import BuildPhase\nfrom sphinx.util.logging import prefixed_warnings\nfrom sphinx.project import Project\nfrom sphinx.errors import ApplicationError\nfrom sphinx.util.logging import getLogger\nfrom ..sphinxext.sphinx_doctree_builder import (\n DocTreeBuilder, DocTreeWriter, DocTreeTranslator)\nfrom ..sphinxext.sphinx_md_builder import MdBuilder, MdWriter, MdTranslator\nfrom ..sphinxext.sphinx_latex_builder import (\n EnhancedLaTeXBuilder, EnhancedLaTeXWriter, EnhancedLaTeXTranslator)\nfrom ..sphinxext.sphinx_rst_builder import RstBuilder, RstWriter, RstTranslator\nfrom ._single_file_html_builder import CustomSingleFileHTMLBuilder\n\n\ndef _get_LaTeXTranslator():\n try:\n from sphinx.writers.latex import LaTeXTranslator\n except ImportError: # pragma: no cover\n # Since sphinx 1.7.3 (circular reference).\n import sphinx.builders.latex.transforms\n from sphinx.writers.latex import LaTeXTranslator\n return LaTeXTranslator\n\n\ntry:\n from sphinx.util.docutils import is_html5_writer_available\nexcept ImportError:\n def is_html5_writer_available():\n return True\n\nif is_html5_writer_available():\n from sphinx.writers.html5 import HTML5Translator as HTMLTranslator\nelse:\n from sphinx.writers.html import HTMLTranslator # pragma: no cover\n\n\ndef update_docutils_languages(values=None):\n \"\"\"\n Updates ``docutils/languages/en.py`` with missing labels.\n It Does it for languages *en*.\n\n @param values consider values in this dictionaries first\n \"\"\"\n if values is None:\n values = dict()\n lab = docutils_en.labels\n if 'versionmodified' not in lab:\n lab['versionmodified'] = values.get(\n 'versionmodified', 'modified version')\n if 'desc' not in lab:\n lab['desc'] = values.get('desc', 'description')\n\n\nclass _AdditionalVisitDepart:\n \"\"\"\n Additional visitors and departors.\n \"\"\"\n\n def __init__(self, output_format):\n self.output_format = output_format\n\n def is_html(self):\n \"\"\"\n Tells if the translator is :epkg:`html` format.\n \"\"\"\n return self.base_class is HTMLTranslator\n\n def is_rst(self):\n \"\"\"\n Tells if the translator is :epkg:`rst` format.\n \"\"\"\n return self.base_class is RstTranslator\n\n def is_latex(self):\n \"\"\"\n Tells if the translator is :epkg:`latex` format.\n \"\"\"\n return self.base_class is _get_LaTeXTranslator()\n\n def is_md(self):\n \"\"\"\n Tells if the translator is :epkg:`markdown` format.\n \"\"\"\n return self.base_class is _get_LaTeXTranslator()\n\n def is_doctree(self):\n \"\"\"\n Tells if the translator is doctree format.\n \"\"\"\n return self.base_class is _get_LaTeXTranslator()\n\n def add_secnumber(self, node):\n \"\"\"\n Overwrites this method to catch errors due when\n it is a single document being processed.\n \"\"\"\n if node.get('secnumber'):\n self.base_class.add_secnumber(self, node)\n elif len(node.parent['ids']) > 0:\n self.base_class.add_secnumber(self, node)\n else:\n n = len(self.builder.secnumbers)\n node.parent['ids'].append(\"custom_label_%d\" % n)\n self.base_class.add_secnumber(self, node)\n\n def eval_expr(self, expr):\n rst = self.output_format == 'rst'\n latex = self.output_format in ('latex', 'elatex')\n texinfo = [('index', 'A_AdditionalVisitDepart', 'B_AdditionalVisitDepart', # pylint: disable=W0612\n 'C_AdditionalVisitDepart', 'D_AdditionalVisitDepart',\n 'E_AdditionalVisitDepart', 'Miscellaneous')]\n html = self.output_format == 'html'\n md = self.output_format == 'md'\n doctree = self.output_format in ('doctree', 'doctree.txt')\n if not (rst or html or latex or md or doctree):\n raise ValueError( # pragma: no cover\n f\"Unknown output format '{self.output_format}'.\")\n try:\n ev = eval(expr)\n except Exception: # pragma: no cover\n raise ValueError(\n f\"Unable to interpret expression '{expr}'\")\n return ev\n\n def visit_only(self, node):\n ev = self.eval_expr(node.attributes['expr'])\n if ev:\n pass\n else:\n raise nodes.SkipNode\n\n def depart_only(self, node):\n ev = self.eval_expr(node.attributes['expr'])\n if ev:\n pass\n else:\n # The program should not necessarily be here.\n pass\n\n def visit_viewcode_anchor(self, node):\n # Removed in sphinx 3.5\n pass\n\n def depart_viewcode_anchor(self, node):\n # Removed in sphinx 3.5\n pass\n\n def unknown_visit(self, node): # pragma: no cover\n raise NotImplementedError(\n \"[_AdditionalVisitDepart] Unknown node: '{0}' in '{1}'\".format(\n node.__class__.__name__, self.__class__.__name__))\n\n\nclass HTMLTranslatorWithCustomDirectives(_AdditionalVisitDepart, HTMLTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n HTMLTranslator.__init__(self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'html')\n nodes_list = getattr(builder, '_function_node', None)\n if nodes_list is not None:\n for name, f1, f2 in nodes_list:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = HTMLTranslator\n\n def visit_field(self, node):\n if not hasattr(self, '_fieldlist_row_index'):\n # needed when a docstring starts with :param:\n self._fieldlist_row_index = 0\n return HTMLTranslator.visit_field(self, node)\n\n def visit_pending_xref(self, node):\n self.visit_Text(node)\n raise nodes.SkipNode\n\n def unknown_visit(self, node): # pragma: no cover\n raise NotImplementedError(\"[HTMLTranslatorWithCustomDirectives] Unknown node: '{0}' in '{1}'\".format(\n node.__class__.__name__, self.__class__.__name__))\n\n\nclass RSTTranslatorWithCustomDirectives(_AdditionalVisitDepart, RstTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n RstTranslator.__init__(self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'rst')\n for name, f1, f2 in builder._function_node:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = RstTranslator\n\n\nclass MDTranslatorWithCustomDirectives(_AdditionalVisitDepart, MdTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n MdTranslator.__init__(self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'md')\n for name, f1, f2 in builder._function_node:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = MdTranslator\n\n\nclass DocTreeTranslatorWithCustomDirectives(DocTreeTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n DocTreeTranslator.__init__(self, document, builder, *args, **kwds)\n self.base_class = DocTreeTranslator\n\n\nclass LatexTranslatorWithCustomDirectives(_AdditionalVisitDepart, EnhancedLaTeXTranslator):\n \"\"\"\n See @see cl LatexWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n if not hasattr(builder, \"config\"):\n builder, document = document, builder\n if not hasattr(builder, \"config\"):\n raise TypeError( # pragma: no cover\n f\"Builder has no config: {type(builder)} - {type(document)}\")\n EnhancedLaTeXTranslator.__init__(\n self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'md')\n for name, f1, f2 in builder._function_node:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = EnhancedLaTeXTranslator\n\n\nclass _WriterWithCustomDirectives:\n \"\"\"\n Common class to @see cl HTMLWriterWithCustomDirectives and @see cl RSTWriterWithCustomDirectives.\n \"\"\"\n\n def _init(self, base_class, translator_class, app=None):\n \"\"\"\n @param base_class base class\n @param app Sphinx application\n \"\"\"\n if app is None:\n self.app = _CustomSphinx(srcdir=None, confdir=None, outdir=None, doctreedir=None,\n buildername='memoryhtml')\n else:\n self.app = app\n builder = self.app.builder\n builder.fignumbers = {}\n base_class.__init__(self, builder)\n self.translator_class = translator_class\n self.builder.secnumbers = {}\n self.builder._function_node = []\n self.builder.current_docname = None\n self.base_class = base_class\n\n def connect_directive_node(self, name, f_visit, f_depart):\n \"\"\"\n Adds custom node to the translator.\n\n @param name name of the directive\n @param f_visit visit function\n @param f_depart depart function\n \"\"\"\n if self.builder.format != \"doctree\":\n self.builder._function_node.append((name, f_visit, f_depart))\n\n def add_configuration_options(self, new_options):\n \"\"\"\n Add new options.\n\n @param new_options new options\n \"\"\"\n for k, v in new_options.items():\n self.builder.config.values[k] = v\n\n def write(self, document, destination):\n \"\"\"\n Processes a document into its final form.\n Translates `document` (a Docutils document tree) into the Writer's\n native format, and write it out to its `destination` (a\n `docutils.io.Output` subclass object).\n\n Normally not overridden or extended in subclasses.\n \"\"\"\n self.base_class.write(self, document, destination)\n\n\nclass HTMLWriterWithCustomDirectives(_WriterWithCustomDirectives, HTMLWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the HTML writer with\n custom directives implemented in this module,\n @see cl RunPythonDirective, @see cl BlogPostDirective.\n\n See `Write your own ReStructuredText-Writer `_.\n\n This class needs to tell :epkg:`docutils` to call the added function\n when directives *runpython* or *blogpost* are met.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, HTMLWriter, HTMLTranslatorWithCustomDirectives, app)\n\n def translate(self):\n self.visitor = visitor = self.translator_class(\n self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.astext()\n for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',\n 'body_pre_docinfo', 'docinfo', 'body', 'fragment',\n 'body_suffix', 'meta', 'title', 'subtitle', 'header',\n 'footer', 'html_prolog', 'html_head', 'html_title',\n 'html_subtitle', 'html_body', ):\n setattr(self, attr, getattr(visitor, attr, None))\n self.clean_meta = ''.join(visitor.meta[2:])\n\n\nclass RSTWriterWithCustomDirectives(_WriterWithCustomDirectives, RstWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the :epkg:`RST` writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, RstWriter, RSTTranslatorWithCustomDirectives, app)\n\n def translate(self):\n visitor = self.translator_class(self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass MDWriterWithCustomDirectives(_WriterWithCustomDirectives, MdWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the :epkg:`MD` writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, MdWriter, MDTranslatorWithCustomDirectives, app)\n\n def translate(self):\n visitor = self.translator_class(self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass DocTreeWriterWithCustomDirectives(_WriterWithCustomDirectives, DocTreeWriter):\n \"\"\"\n This :epkg:`docutils` writer creates a doctree writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, DocTreeWriter, DocTreeTranslatorWithCustomDirectives, app)\n\n def translate(self):\n visitor = self.translator_class(self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass LatexWriterWithCustomDirectives(_WriterWithCustomDirectives, EnhancedLaTeXWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the :epkg:`Latex` writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, EnhancedLaTeXWriter, LatexTranslatorWithCustomDirectives, app)\n if not hasattr(self.builder, \"config\"):\n raise TypeError( # pragma: no cover\n f\"Builder has no config: {type(self.builder)}\")\n\n def translate(self):\n if not hasattr(self.builder, \"config\"):\n raise TypeError( # pragma: no cover\n f\"Builder has no config: {type(self.builder)}\")\n # The instruction\n # visitor = self.builder.create_translator(self.document, self.builder)\n # automatically adds methods visit_ and depart_ for translator\n # based on the list of registered extensions. Might be worth using it.\n theme = self.builder.themes.get('manual')\n if theme is None:\n raise RuntimeError( # pragma: no cover\n \"theme cannot be None.\")\n visitor = self.translator_class(\n self.document, self.builder, theme=theme)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass _MemoryBuilder:\n \"\"\"\n Builds :epkg:`HTML` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n\n def _init(self, base_class, app, env=None):\n \"\"\"\n Constructs the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param base_class: base builder class\n :param app: :epkg:`Sphinx application`\n :param env: Environment\n \"\"\"\n if \"IMPOSSIBLE:TOFIND\" in app.srcdir:\n import sphinx.util.osutil\n from .conf_path_tools import custom_ensuredir\n sphinx.util.osutil.ensuredir = custom_ensuredir\n sphinx.builders.ensuredir = custom_ensuredir\n\n try:\n base_class.__init__(self, app=app, env=env)\n except TypeError:\n # older version of sphinx\n base_class.__init__(self, app=app)\n self.built_pages = {}\n self.base_class = base_class\n\n def iter_pages(self):\n \"\"\"\n Enumerate created pages.\n\n @return iterator on tuple(name, content)\n \"\"\"\n for k, v in self.built_pages.items():\n yield k, v.getvalue()\n\n def create_translator(self, *args):\n \"\"\"\n Returns an instance of translator.\n This method returns an instance of ``default_translator_class`` by default.\n Users can replace the translator class with ``app.set_translator()`` API.\n \"\"\"\n translator_class = self.translator_class\n return translator_class(*args)\n\n def _write_serial(self, docnames):\n \"\"\"\n Overwrites *_write_serial* to avoid writing on disk.\n \"\"\"\n from sphinx.util.logging import pending_warnings\n try:\n from sphinx.util.display import status_iterator\n except ImportError:\n from sphinx.util import status_iterator\n with pending_warnings():\n for docname in status_iterator(docnames, 'writing output... ', \"darkgreen\",\n len(docnames), self.app.verbosity):\n doctree = self.env.get_and_resolve_doctree(docname, self)\n self.write_doc_serialized(docname, doctree)\n self.write_doc(docname, doctree)\n\n def _write_parallel(self, docnames, nproc):\n \"\"\"\n Not supported.\n \"\"\"\n raise NotImplementedError(\n \"Use parallel=0 when creating the sphinx application.\")\n\n def assemble_doctree(self, *args, **kwargs):\n \"\"\"\n Overwrites *assemble_doctree* to control the doctree.\n \"\"\"\n from sphinx.util.nodes import inline_all_toctrees\n from sphinx.util.console import darkgreen\n master = self.config.master_doc\n if hasattr(self, \"doctree_\"):\n tree = self.doctree_\n else:\n raise AttributeError( # pragma: no cover\n \"Attribute 'doctree_' is not present. Call method finalize().\")\n tree = inline_all_toctrees(\n self, set(), master, tree, darkgreen, [master])\n tree['docname'] = master\n self.env.resolve_references(tree, master, self)\n self.fix_refuris(tree)\n return tree\n\n def fix_refuris(self, tree):\n \"\"\"\n Overwrites *fix_refuris* to control the reference names.\n \"\"\"\n fname = \"__\" + self.config.master_doc + \"__\"\n for refnode in tree.traverse(nodes.reference):\n if 'refuri' not in refnode:\n continue\n refuri = refnode['refuri']\n hashindex = refuri.find('#')\n if hashindex < 0:\n continue\n hashindex = refuri.find('#', hashindex + 1)\n if hashindex >= 0:\n refnode['refuri'] = fname + refuri[hashindex:]\n\n def get_target_uri(self, docname, typ=None):\n \"\"\"\n Overwrites *get_target_uri* to control the page name.\n \"\"\"\n if docname in self.env.all_docs:\n # all references are on the same page...\n return self.config.master_doc + '#document-' + docname\n elif docname in (\"genindex\", \"search\"):\n return self.config.master_doc + '-#' + docname\n else:\n docs = \", \".join( # pragma: no cover\n sorted(f\"'{_}'\" for _ in self.env.all_docs))\n raise ValueError( # pragma: no cover\n f\"docname='{docname}' should be in 'self.env.all_docs' which contains:\\n{docs}\")\n\n def get_outfilename(self, pagename):\n \"\"\"\n Overwrites *get_target_uri* to control file names.\n \"\"\"\n return f\"{self.outdir}/{pagename}.m.html\".replace(\"\\\\\", \"/\")\n\n def handle_page(self, pagename, addctx, templatename='page.html',\n outfilename=None, event_arg=None):\n \"\"\"\n Overrides *handle_page* to write into stream instead of files.\n \"\"\"\n from sphinx.util.osutil import relative_uri\n ctx = self.globalcontext.copy()\n if hasattr(self, \"warning\"):\n ctx['warn'] = self.warning\n elif hasattr(self, \"warn\"):\n ctx['warn'] = self.warn\n # current_page_name is backwards compatibility\n ctx['pagename'] = ctx['current_page_name'] = pagename\n ctx['encoding'] = self.config.html_output_encoding\n default_baseuri = self.get_target_uri(pagename)\n # in the singlehtml builder, default_baseuri still contains an #anchor\n # part, which relative_uri doesn't really like...\n default_baseuri = default_baseuri.rsplit('#', 1)[0]\n\n def pathto(otheruri, resource=False, baseuri=default_baseuri):\n if resource and '://' in otheruri:\n # allow non-local resources given by scheme\n return otheruri\n elif not resource:\n otheruri = self.get_target_uri(otheruri)\n uri = relative_uri(baseuri, otheruri) or '#'\n if uri == '#' and not self.allow_sharp_as_current_path:\n uri = baseuri\n return uri\n ctx['pathto'] = pathto\n\n def css_tag(css):\n attrs = []\n for key in sorted(css.attributes):\n value = css.attributes[key]\n if value is not None:\n attrs.append('%s=\"%s\"' % (key, htmlescape( # pylint: disable=W1505\n value, True))) # pylint: disable=W1505\n attrs.append(f'href=\"{pathto(css.filename, resource=True)}\"')\n return f\"\"\n ctx['css_tag'] = css_tag\n\n def hasdoc(name):\n if name in self.env.all_docs:\n return True\n elif name == 'search' and self.search:\n return True\n elif name == 'genindex' and self.get_builder_config('use_index', 'html'):\n return True\n return False\n ctx['hasdoc'] = hasdoc\n\n ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)\n self.add_sidebars(pagename, ctx)\n ctx.update(addctx)\n\n self.update_page_context(pagename, templatename, ctx, event_arg)\n newtmpl = self.app.emit_firstresult('html-page-context', pagename,\n templatename, ctx, event_arg)\n if newtmpl:\n templatename = newtmpl\n\n try:\n output = self.templates.render(templatename, ctx)\n except UnicodeError: # pragma: no cover\n logger = getLogger(\"MockSphinxApp\")\n logger.warning(\"[_CustomSphinx] A unicode error occurred when rendering the page %s. \"\n \"Please make sure all config values that contain \"\n \"non-ASCII content are Unicode strings.\", pagename)\n return\n\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n # outfilename's path is in general different from self.outdir\n # ensuredir(path.dirname(outfilename))\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(output)\n\n\nclass MemoryHTMLBuilder(_MemoryBuilder, CustomSingleFileHTMLBuilder):\n \"\"\"\n Builds :epkg:`HTML` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memoryhtml'\n format = 'html'\n out_suffix = None # \".memory.html\"\n supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']\n default_translator_class = HTMLTranslatorWithCustomDirectives\n translator_class = HTMLTranslatorWithCustomDirectives\n _writer_class = HTMLWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Construct the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, CustomSingleFileHTMLBuilder, app, env=env)\n\n\nclass MemoryRSTBuilder(_MemoryBuilder, RstBuilder):\n\n \"\"\"\n Builds :epkg:`RST` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n The writer simplifies the :epkg:`RST` syntax by replacing\n custom roles into true :epkg:`RST` syntax.\n \"\"\"\n\n name = 'memoryrst'\n format = 'rst'\n out_suffix = None # \".memory.rst\"\n supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']\n default_translator_class = RSTTranslatorWithCustomDirectives\n translator_class = RSTTranslatorWithCustomDirectives\n _writer_class = RSTWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Construct the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, RstBuilder, app, env=env)\n\n def handle_page(self, pagename, addctx, templatename=None,\n outfilename=None, event_arg=None):\n \"\"\"\n Override *handle_page* to write into stream instead of files.\n \"\"\"\n if templatename is not None:\n raise NotImplementedError(\n \"templatename must be None.\") # pragma: no cover\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(self.writer.output)\n\n\nclass MemoryMDBuilder(_MemoryBuilder, MdBuilder):\n \"\"\"\n Builds :epkg:`MD` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memorymd'\n format = 'md'\n out_suffix = None # \".memory.rst\"\n supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']\n default_translator_class = MDTranslatorWithCustomDirectives\n translator_class = MDTranslatorWithCustomDirectives\n _writer_class = MDWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Construct the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, MdBuilder, app, env=env)\n\n def handle_page(self, pagename, addctx, templatename=None,\n outfilename=None, event_arg=None):\n \"\"\"\n Override *handle_page* to write into stream instead of files.\n \"\"\"\n if templatename is not None:\n raise NotImplementedError(\n \"templatename must be None.\") # pragma: no cover\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(self.writer.output)\n\n\nclass MemoryDocTreeBuilder(_MemoryBuilder, DocTreeBuilder):\n \"\"\"\n Builds doctree output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memorydoctree'\n format = 'doctree'\n out_suffix = None # \".memory.rst\"\n default_translator_class = DocTreeTranslatorWithCustomDirectives\n translator_class = DocTreeTranslatorWithCustomDirectives\n _writer_class = DocTreeWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Constructs the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, DocTreeBuilder, app, env=env)\n\n def handle_page(self, pagename, addctx, templatename=None,\n outfilename=None, event_arg=None):\n \"\"\"\n Override *handle_page* to write into stream instead of files.\n \"\"\"\n if templatename is not None:\n raise NotImplementedError(\n \"templatename must be None.\") # pragma: no cover\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(self.writer.output)\n\n\nclass MemoryLatexBuilder(_MemoryBuilder, EnhancedLaTeXBuilder):\n \"\"\"\n Builds :epkg:`Latex` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memorylatex'\n format = 'tex'\n out_suffix = None # \".memory.tex\"\n supported_image_types = ['image/png', 'image/jpeg', 'image/gif']\n default_translator_class = LatexTranslatorWithCustomDirectives\n translator_class = LatexTranslatorWithCustomDirectives\n _writer_class = LatexWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Constructs the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, EnhancedLaTeXBuilder, app, env=env)\n\n def write_stylesheet(self):\n from sphinx.highlighting import PygmentsBridge\n highlighter = PygmentsBridge('latex', self.config.pygments_style)\n rows = []\n rows.append('\\\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\\n')\n rows.append('\\\\ProvidesPackage{sphinxhighlight}')\n rows.append(\n '[2016/05/29 stylesheet for highlighting with pygments]\\n\\n')\n rows.append(highlighter.get_stylesheet())\n self.built_pages['sphinxhighlight.sty'] = StringIO()\n self.built_pages['sphinxhighlight.sty'].write(\"\".join(rows))\n\n class EnhancedStringIO(StringIO):\n def write(self, content):\n if isinstance(content, str):\n StringIO.write(self, content)\n else:\n for line in content:\n StringIO.write(self, line)\n\n def _get_filename(self, targetname, encoding='utf-8', overwrite_if_changed=True):\n if not isinstance(targetname, str):\n raise TypeError( # pragma: no cover\n f\"targetname must be a string: {targetname}\")\n destination = MemoryLatexBuilder.EnhancedStringIO()\n self.built_pages[targetname] = destination\n return destination\n\n\nclass _CustomBuildEnvironment(BuildEnvironment):\n \"\"\"\n Overrides some functionalities of\n `BuildEnvironment `_.\n \"\"\"\n\n def __init__(self, app):\n \"\"\"\n \"\"\"\n BuildEnvironment.__init__(self, app)\n self.doctree_ = {}\n\n def get_doctree(self, docname):\n \"\"\"Read the doctree for a file from the pickle and return it.\"\"\"\n if hasattr(self, \"doctree_\") and docname in self.doctree_:\n from sphinx.util.docutils import WarningStream\n doctree = self.doctree_[docname]\n doctree.settings.env = self\n doctree.reporter = Reporter(self.doc2path(\n docname), 2, 5, stream=WarningStream())\n return doctree\n\n if hasattr(self, \"doctree_\"):\n available = list(sorted(self.doctree_))\n if len(available) > 10:\n available = available[10:]\n raise KeyError(\n \"Unable to find entry '{}' (has doctree: {})\\nFirst documents:\\n{}\"\n \"\".format(\n docname, hasattr(self, \"doctree_\"),\n \"\\n\".join(available)))\n\n raise KeyError( # pragma: no cover\n \"Doctree empty or not found for '{}' (has doctree: {})\"\n \"\".format(\n docname, hasattr(self, \"doctree_\")))\n # return BuildEnvironment.get_doctree(self, docname)\n\n def apply_post_transforms(self, doctree, docname):\n \"\"\"Apply all post-transforms.\"\"\"\n # set env.docname during applying post-transforms\n self.temp_data['docname'] = docname\n\n transformer = SphinxTransformer(doctree)\n transformer.set_environment(self)\n transformer.add_transforms(self.app.post_transforms)\n transformer.apply_transforms()\n self.temp_data.clear()\n\n\nclass _CustomSphinx(Sphinx):\n \"\"\"\n Custom :epkg:`Sphinx` application to avoid using disk.\n \"\"\"\n\n def __init__(self, srcdir, confdir, outdir, doctreedir, buildername=\"memoryhtml\", # pylint: disable=W0231\n confoverrides=None, status=None, warning=None,\n freshenv=False, warningiserror=False,\n tags=None, verbosity=0, parallel=0, keep_going=False,\n new_extensions=None):\n '''\n Same constructor as :epkg:`Sphinx application`.\n Additional parameters:\n\n @param new_extensions extensions to add to the application\n\n Some insights about domains:\n\n ::\n\n {'cpp': sphinx.domains.cpp.CPPDomain,\n 'hpp': sphinx.domains.cpp.CPPDomain,\n 'h': sphinx.domains.cpp.CPPDomain,\n 'js': sphinx.domains.javascript.JavaScriptDomain,\n 'std': sphinx.domains.std.StandardDomain,\n 'py': sphinx.domains.python.PythonDomain,\n 'rst': sphinx.domains.rst.ReSTDomain,\n 'c': sphinx.domains.c.CDomain}\n\n And builders:\n\n ::\n\n {'epub': ('epub', 'EpubBuilder'),\n 'singlehtml': ('html', 'SingleFileHTMLBuilder'),\n 'qthelp': ('qthelp', 'QtHelpBuilder'),\n 'epub3': ('epub3', 'Epub3Builder'),\n 'man': ('manpage', 'ManualPageBuilder'),\n 'dummy': ('dummy', 'DummyBuilder'),\n 'json': ('html', 'JSONHTMLBuilder'),\n 'html': ('html', 'StandaloneHTMLBuilder'),\n 'xml': ('xml', 'XMLBuilder'),\n 'texinfo': ('texinfo', 'TexinfoBuilder'),\n 'devhelp': ('devhelp', 'DevhelpBuilder'),\n 'web': ('html', 'PickleHTMLBuilder'),\n 'pickle': ('html', 'PickleHTMLBuilder'),\n 'htmlhelp': ('htmlhelp', 'HTMLHelpBuilder'),\n 'applehelp': ('applehelp', 'AppleHelpBuilder'),\n 'linkcheck': ('linkcheck', 'CheckExternalLinksBuilder'),\n 'dirhtml': ('html', 'DirectoryHTMLBuilder'),\n 'latex': ('latex', 'LaTeXBuilder'),\n 'elatex': ('latex', 'EnchancedLaTeXBuilder'),\n 'text': ('text', 'TextBuilder'),\n 'changes': ('changes', 'ChangesBuilder'),\n 'websupport': ('websupport', 'WebSupportBuilder'),\n 'gettext': ('gettext', 'MessageCatalogBuilder'),\n 'pseudoxml': ('xml', 'PseudoXMLBuilder')}\n 'rst': ('rst', 'RstBuilder')}\n 'md': ('md', 'MdBuilder'),\n 'doctree': ('doctree', 'DocTreeBuilder')}\n '''\n # own purpose (to monitor)\n self._logger = getLogger(\"_CustomSphinx\")\n self._added_objects = []\n self._added_collectors = []\n\n # from sphinx.domains.cpp import CPPDomain\n # from sphinx.domains.javascript import JavaScriptDomain\n # from sphinx.domains.python import PythonDomain\n # from sphinx.domains.std import StandardDomain\n # from sphinx.domains.rst import ReSTDomain\n # from sphinx.domains.c import CDomain\n\n from sphinx.registry import SphinxComponentRegistry\n self.phase = BuildPhase.INITIALIZATION\n self.verbosity = verbosity\n self.extensions = {}\n self.builder = None\n self.env = None\n self.project = None\n self.registry = SphinxComponentRegistry()\n self.post_transforms = []\n self.pdb = False\n\n if doctreedir is None:\n doctreedir = \"IMPOSSIBLE:TOFIND\"\n if srcdir is None:\n srcdir = \"IMPOSSIBLE:TOFIND\"\n update_docutils_languages()\n\n self.srcdir = os.path.abspath(srcdir)\n self.confdir = os.path.abspath(\n confdir) if confdir is not None else None\n self.outdir = os.path.abspath(outdir) if confdir is not None else None\n self.doctreedir = os.path.abspath(doctreedir)\n self.parallel = parallel\n\n if self.srcdir == self.outdir:\n raise ApplicationError('Source directory and destination ' # pragma: no cover\n 'directory cannot be identical')\n\n if status is None:\n self._status = StringIO()\n self.quiet = True\n else:\n self._status = status\n self.quiet = False\n\n from sphinx.events import EventManager\n # logging.setup(self, self._status, self._warning)\n self.events = EventManager(self)\n\n # keep last few messages for traceback\n # This will be filled by sphinx.util.logging.LastMessagesWriter\n self.messagelog = deque(maxlen=10)\n\n # say hello to the world\n from sphinx import __display_version__\n self.info(f'Running Sphinx v{__display_version__}') # pragma: no cover\n\n # notice for parallel build on macOS and py38+\n if sys.version_info > (3, 8) and platform.system() == 'Darwin' and parallel > 1:\n self._logger.info( # pragma: no cover\n \"For security reason, parallel mode is disabled on macOS and \"\n \"python3.8 and above. For more details, please read \"\n \"https://github.com/sphinx-doc/sphinx/issues/6803\")\n\n # status code for command-line application\n self.statuscode = 0\n\n # delayed import to speed up time\n from sphinx.application import builtin_extensions\n from sphinx.config import CONFIG_FILENAME, Config, Tags\n\n # read config\n self.tags = Tags(tags)\n with warnings.catch_warnings():\n warnings.simplefilter(\n \"ignore\", (DeprecationWarning, PendingDeprecationWarning))\n if self.confdir is None:\n self.config = Config({}, confoverrides or {})\n else: # pragma: no cover\n try:\n self.config = Config.read(\n self.confdir, confoverrides or {}, self.tags)\n except AttributeError:\n try:\n self.config = Config( # pylint: disable=E1121\n confdir, confoverrides or {}, self.tags)\n except TypeError:\n try:\n self.config = Config(confdir, CONFIG_FILENAME, # pylint: disable=E1121\n confoverrides or {}, self.tags)\n except TypeError:\n # Sphinx==3.0.0\n self.config = Config({}, confoverrides or {})\n self.sphinx__display_version__ = __display_version__\n\n # create the environment\n self.config.pre_init_values()\n\n # set up translation infrastructure\n self._init_i18n()\n\n # check the Sphinx version if requested\n if (self.config.needs_sphinx and self.config.needs_sphinx >\n __display_version__): # pragma: no cover\n from sphinx.locale import _\n from sphinx.application import VersionRequirementError\n raise VersionRequirementError(\n _('This project needs at least Sphinx v%s and therefore cannot '\n 'be built with this version.') % self.config.needs_sphinx)\n\n # set confdir to srcdir if -C given (!= no confdir); a few pieces\n # of code expect a confdir to be set\n if self.confdir is None:\n self.confdir = self.srcdir\n\n # load all built-in extension modules\n for extension in builtin_extensions:\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning)\n self.setup_extension(extension)\n except Exception as e: # pragma: no cover\n if 'sphinx.builders.applehelp' not in str(e): # pragma: no cover\n mes = \"Unable to run setup_extension '{0}'\\nWHOLE LIST\\n{1}\".format(\n extension, \"\\n\".join(builtin_extensions))\n raise ExtensionError(mes) from e\n\n # load all user-given extension modules\n for extension in self.config.extensions:\n self.setup_extension(extension)\n\n # /1 addition to the original code\n # additional extensions\n if new_extensions:\n for extension in new_extensions:\n if isinstance(extension, str):\n self.setup_extension(extension)\n else: # pragma: no cover\n # We assume it is a module.\n dirname = os.path.dirname(extension.__file__)\n sys.path.insert(0, dirname)\n self.setup_extension(extension.__name__)\n del sys.path[0]\n\n # add default HTML builders\n self.add_builder(MemoryHTMLBuilder)\n self.add_builder(MemoryRSTBuilder)\n self.add_builder(MemoryMDBuilder)\n self.add_builder(MemoryLatexBuilder)\n self.add_builder(MemoryDocTreeBuilder)\n\n if isinstance(buildername, tuple):\n if len(buildername) != 2:\n raise ValueError( # pragma: no cover\n \"The builder can be custom but it must be specifed \"\n \"as a 2-uple=(builder_name, builder_class).\")\n self.add_builder(buildername[1])\n buildername = buildername[0]\n\n # /1 end of addition\n\n # preload builder module (before init config values)\n self.preload_builder(buildername)\n\n # the config file itself can be an extension\n if self.config.setup:\n prefix = f\"while setting up extension {'conf.py'}:\"\n if prefixed_warnings is not None:\n with prefixed_warnings(prefix):\n if callable(self.config.setup):\n self.config.setup(self)\n else: # pragma: no cover\n from sphinx.locale import _\n from sphinx.application import ConfigError\n raise ConfigError(\n _(\"'setup' as currently defined in conf.py isn't a Python callable. \"\n \"Please modify its definition to make it a callable function. This is \"\n \"needed for conf.py to behave as a Sphinx extension.\")\n )\n elif callable(self.config.setup):\n self.config.setup(self)\n\n # now that we know all config values, collect them from conf.py\n noallowed = []\n rem = []\n for k in confoverrides:\n if k in {'initial_header_level', 'doctitle_xform', 'input_encoding',\n 'outdir', 'warnings_log', 'extensions'}:\n continue\n if k == 'override_image_directive':\n self.config.images_config[\"override_image_directive\"] = True\n rem.append(k)\n continue\n if k not in self.config.values:\n noallowed.append(k)\n for k in rem:\n del confoverrides[k]\n if len(noallowed) > 0:\n raise ValueError( # pragma: no cover\n \"The following configuration values are declared in any extension.\\n--???--\\n\"\n \"{0}\\n--DECLARED--\\n{1}\".format(\n \"\\n\".join(sorted(noallowed)),\n \"\\n\".join(sorted(self.config.values))))\n\n # now that we know all config values, collect them from conf.py\n self.config.init_values()\n self.events.emit('config-inited', self.config)\n\n # /2 addition to the original code\n # check extension versions if requested\n # self.config.needs_extensions = self.config.extensions\n if not hasattr(self.config, 'items'):\n\n def _citems():\n for k, v in self.config.values.items():\n yield k, v\n\n self.config.items = _citems\n\n # /2 end of addition\n\n # create the project\n self.project = Project(self.srcdir, self.config.source_suffix)\n # set up the build environment\n self._init_env(freshenv)\n assert self.env is not None\n # create the builder, initializes _MemoryBuilder\n self.builder = self.create_builder(buildername)\n # build environment post-initialisation, after creating the builder\n if hasattr(self, \"_post_init_env\"):\n self._post_init_env()\n # set up the builder\n self._init_builder()\n\n if not isinstance(self.env, _CustomBuildEnvironment):\n raise TypeError( # pragma: no cover\n f\"self.env is not _CustomBuildEnvironment: {type(self.env)!r} \"\n f\"buildername='{buildername}'\")\n\n # addition\n self._extended_init_()\n\n # verification\n self._check_init_()\n\n def _init_builder(self) -> None:\n if not hasattr(self.builder, \"env\") or self.builder.env is None:\n self.builder.set_environment(self.env)\n self.builder.init()\n self.events.emit('builder-inited')\n\n def _check_init_(self):\n pass\n\n def _init_env(self, freshenv):\n ENV_PICKLE_FILENAME = 'environment.pickle'\n filename = os.path.join(self.doctreedir, ENV_PICKLE_FILENAME)\n if freshenv or not os.path.exists(filename):\n self.env = _CustomBuildEnvironment(self)\n self._fresh_env_used = True\n self.env.setup(self)\n if (self.srcdir is not None and self.srcdir != \"IMPOSSIBLE:TOFIND\" and\n self.builder is not None):\n self.env.find_files(self.config, self.builder)\n return self.env\n\n if \"IMPOSSIBLE:TOFIND\" not in self.doctreedir: # pragma: no cover\n from sphinx.application import ENV_PICKLE_FILENAME\n filename = os.path.join(self.doctreedir, ENV_PICKLE_FILENAME)\n try:\n self.info('loading pickled environment... ')\n with open(filename, 'rb') as f:\n self.env = pickle.load(f)\n self.env.setup(self)\n self.info('done')\n return self.env\n except Exception as err:\n self.info('failed: %r', err)\n return self._init_env(freshenv=True)\n\n if self.env is None: # pragma: no cover\n self.env = _CustomBuildEnvironment(self)\n if hasattr(self.env, 'setup'):\n self.env.setup(self)\n return self.env\n\n if not hasattr(self.env, 'project') or self.env.project is None:\n raise AttributeError( # pragma: no cover\n \"self.env.project is not initialized.\")\n\n def create_builder(self, name):\n \"\"\"\n Creates a builder, raises an exception if name is None.\n \"\"\"\n if name is None:\n raise ValueError( # pragma: no cover\n \"Builder name cannot be None\")\n try:\n return self.registry.create_builder(self, name, env=self.env)\n except TypeError:\n # old version of sphinx\n return self.registry.create_builder(self, name)\n\n def _extended_init_(self):\n \"\"\"\n Additional initialization steps.\n \"\"\"\n if not hasattr(self, \"domains\"):\n self.domains = {}\n if not hasattr(self, \"_events\"):\n self._events = {}\n\n # Otherwise, role issue is missing.\n setup_link_roles(self)\n\n def _lookup_doctree(self, doctree, node_type):\n for node in doctree.traverse(node_type):\n yield node\n\n def _add_missing_ids(self, doctree):\n for i, node in enumerate(self._lookup_doctree(doctree, None)):\n stype = str(type(node))\n if ('section' not in stype and 'title' not in stype and\n 'reference' not in stype):\n continue\n try:\n node['ids'][0]\n except IndexError:\n node['ids'] = ['missing%d' % i]\n except TypeError: # pragma: no cover\n pass\n\n def finalize(self, doctree, external_docnames=None):\n \"\"\"\n Finalizes the documentation after it was parsed.\n\n @param doctree doctree (or pub.document), available after publication\n @param external_docnames other docnames the doctree references\n \"\"\"\n imgs = list(self._lookup_doctree(doctree, nodes.image))\n for img in imgs:\n img['save_uri'] = img['uri']\n\n if not isinstance(self.env, _CustomBuildEnvironment):\n raise TypeError( # pragma: no cover\n f\"self.env is not _CustomBuildEnvironment: '{type(self.env)}'\")\n if not isinstance(self.builder.env, _CustomBuildEnvironment):\n raise TypeError( # pragma: no cover\n \"self.builder.env is not _CustomBuildEnvironment: '{0}'\".format(\n type(self.builder.env)))\n self.doctree_ = doctree\n self.builder.doctree_ = doctree\n self.env.doctree_[self.config.master_doc] = doctree\n self.env.all_docs = {self.config.master_doc: self.config.master_doc}\n\n if external_docnames:\n for doc in external_docnames:\n self.env.all_docs[doc] = doc\n\n # This steps goes through many function including one\n # modifying paths in image node.\n # Look for node['candidates'] = candidates in Sphinx code.\n # If a path startswith('/'), it is removed.\n from sphinx.environment.collectors.asset import logger as logger_asset\n logger_asset.setLevel(40) # only errors\n self._add_missing_ids(doctree)\n self.events.emit('doctree-read', doctree)\n logger_asset.setLevel(30) # back to warnings\n\n for img in imgs:\n img['uri'] = img['save_uri']\n\n self.events.emit('doctree-resolved', doctree,\n self.config.master_doc)\n self.builder.write(None, None, 'all')\n\n def debug(self, message, *args, **kwargs):\n self._logger.debug(message, *args, **kwargs)\n\n def info(self, message, *args):\n self._logger.info(message, *args)\n\n def warning(self, message='', name=None, type=None, subtype=None):\n if \"is already registered\" not in message: # pragma: no cover\n self._logger.warning(\n \"[_CustomSphinx] %s -- %s\", message, name,\n type=type, subtype=subtype)\n\n def add_builder(self, builder, override=False):\n self._added_objects.append(('builder', builder))\n if builder.name not in self.registry.builders:\n self.debug('[_CustomSphinx] adding builder: %r', builder)\n self.registry.add_builder(builder, override=override)\n else:\n self.debug('[_CustomSphinx] already added builder: %r', builder)\n\n def setup_extension(self, extname):\n self._added_objects.append(('extension', extname))\n\n logger = getLogger('sphinx.application')\n disa = logger.logger.disabled\n logger.logger.disabled = True\n\n # delayed import to speed up time\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning)\n self.registry.load_extension(self, extname)\n except Exception as e: # pragma: no cover\n raise ExtensionError(\n f\"Unable to setup extension '{extname}'\") from e\n finally:\n logger.logger = disa\n\n def add_directive(self, name, obj, content=None, arguments=None, # pylint: disable=W0221,W0237\n override=True, **options):\n self._added_objects.append(('directive', name))\n if name == 'plot' and obj.__name__ == 'PlotDirective':\n\n old_run = obj.run\n\n def run(self):\n \"\"\"Run the plot directive.\"\"\"\n logger = getLogger(\"MockSphinxApp\")\n logger.info('[MockSphinxApp] PlotDirective: %r', self.content)\n try:\n res = old_run(self)\n logger.info('[MockSphinxApp] PlotDirective ok')\n return res\n except OSError as e: # pragma: no cover\n logger = getLogger(\"MockSphinxApp\")\n logger.info('[MockSphinxApp] PlotDirective failed: %s', e)\n return []\n\n obj.run = run\n\n Sphinx.add_directive(self, name, obj, override=override, **options)\n\n def add_domain(self, domain, override=True):\n self._added_objects.append(('domain', domain))\n Sphinx.add_domain(self, domain, override=override)\n # For some reason, the directives are missing from the main catalog\n # in docutils.\n for k, v in domain.directives.items():\n self.add_directive(f\"{domain.name}:{k}\", v)\n if domain.name in ('py', 'std', 'rst'):\n # We add the directive without the domain name as a prefix.\n self.add_directive(k, v)\n for k, v in domain.roles.items():\n self.add_role(f\"{domain.name}:{k}\", v)\n if domain.name in ('py', 'std', 'rst'):\n # We add the role without the domain name as a prefix.\n self.add_role(k, v)\n\n def add_role(self, name, role, override=True):\n self._added_objects.append(('role', name))\n self.debug('[_CustomSphinx] adding role: %r', (name, role))\n roles.register_local_role(name, role)\n\n def add_generic_role(self, name, nodeclass, override=True):\n self._added_objects.append(('generic_role', name))\n self.debug(\"[_CustomSphinx] adding generic role: '%r'\",\n (name, nodeclass))\n role = roles.GenericRole(name, nodeclass)\n roles.register_local_role(name, role)\n\n def add_node(self, node, override=True, **kwds):\n self._added_objects.append(('node', node))\n self.debug('[_CustomSphinx] adding node: %r', (node, kwds))\n nodes._add_node_class_names([node.__name__])\n for key, val in kwds.items():\n try:\n visit, depart = val\n except ValueError: # pragma: no cover\n raise ExtensionError((\"Value for key '%r' must be a \"\n \"(visit, depart) function tuple\") % key)\n translator = self.registry.translators.get(key)\n translators = []\n if translator is not None:\n translators.append(translator)\n elif key == 'html':\n from sphinx.writers.html import HTMLTranslator\n translators.append(HTMLTranslator)\n if is_html5_writer_available():\n from sphinx.writers.html5 import HTML5Translator\n translators.append(HTML5Translator)\n elif key == 'latex':\n translators.append(_get_LaTeXTranslator())\n elif key == 'elatex':\n translators.append(EnhancedLaTeXBuilder)\n elif key == 'text':\n from sphinx.writers.text import TextTranslator\n translators.append(TextTranslator)\n elif key == 'man':\n from sphinx.writers.manpage import ManualPageTranslator\n translators.append(ManualPageTranslator)\n elif key == 'texinfo':\n from sphinx.writers.texinfo import TexinfoTranslator\n translators.append(TexinfoTranslator)\n\n for translator in translators:\n setattr(translator, 'visit_' + node.__name__, visit)\n if depart:\n setattr(translator, 'depart_' + node.__name__, depart)\n\n def add_event(self, name):\n self._added_objects.append(('event', name))\n Sphinx.add_event(self, name)\n\n def add_config_value(self, name, default, rebuild, types_=(), types=()): # pylint: disable=W0221,W0237\n types_ = types or types_\n self._added_objects.append(('config_value', name))\n Sphinx.add_config_value(self, name, default, rebuild, types_)\n\n def add_directive_to_domain(self, domain, name, obj, has_content=None, # pylint: disable=W0221,W0237\n argument_spec=None, override=False, **option_spec):\n self._added_objects.append(('directive_to_domain', domain, name))\n Sphinx.add_directive_to_domain(self, domain, name, obj,\n override=override, **option_spec)\n\n def add_role_to_domain(self, domain, name, role, override=False):\n self._added_objects.append(('roles_to_domain', domain, name))\n Sphinx.add_role_to_domain(self, domain, name, role, override=override)\n\n def add_transform(self, transform):\n self._added_objects.append(('transform', transform))\n Sphinx.add_transform(self, transform)\n\n def add_post_transform(self, transform):\n self._added_objects.append(('post_transform', transform))\n Sphinx.add_post_transform(self, transform)\n\n def add_js_file(self, filename, priority=500, **kwargs): # pylint: disable=W0221\n # loading_method=None: added in Sphinx 4.4\n self._added_objects.append(('js', filename))\n Sphinx.add_js_file(self, filename, priority=priority, **kwargs)\n\n def add_css_file(self, filename, priority=500, **kwargs):\n self._added_objects.append(('css', filename))\n Sphinx.add_css_file(self, filename, priority=priority, **kwargs)\n\n def add_latex_package(self, packagename, options=None, after_hyperref=False):\n self._added_objects.append(('latex', packagename))\n Sphinx.add_latex_package(\n self, packagename=packagename, options=options,\n after_hyperref=after_hyperref)\n\n def add_object_type(self, directivename, rolename, indextemplate='',\n parse_node=None, ref_nodeclass=None, objname='',\n doc_field_types=None, override=False):\n if doc_field_types is None:\n doc_field_types = []\n self._added_objects.append(('object', directivename, rolename))\n Sphinx.add_object_type(self, directivename, rolename, indextemplate=indextemplate,\n parse_node=parse_node, ref_nodeclass=ref_nodeclass,\n objname=objname, doc_field_types=doc_field_types,\n override=override)\n\n def add_env_collector(self, collector):\n \"\"\"\n See :epkg:`class Sphinx`.\n \"\"\"\n self.debug(\n '[_CustomSphinx] adding environment collector: %r', collector)\n coll = collector()\n coll.enable(self)\n self._added_collectors.append(coll)\n\n def disconnect_env_collector(self, clname, exc=True):\n \"\"\"\n Disables a collector given its class name.\n\n @param cl name\n @param exc raises an exception if not found\n @return found collector\n \"\"\"\n found = None\n foundi = None\n for i, co in enumerate(self._added_collectors):\n if clname == co.__class__.__name__:\n found = co\n foundi = i\n break\n if found is not None and not exc:\n return None\n if found is None:\n raise ValueError( # pragma: no cover\n \"Unable to find a collector '{0}' in \\n{1}\".format(\n clname, \"\\n\".join(\n map(lambda x: x.__class__.__name__,\n self._added_collectors))))\n for v in found.listener_ids.values():\n self.disconnect(v)\n del self._added_collectors[foundi]\n return found\n", "repo_name": "sdpython/pyquickhelper", "sub_path": "src/pyquickhelper/helpgen/sphinxm_convert_doc_sphinx_helper.py", "file_name": "sphinxm_convert_doc_sphinx_helper.py", "file_ext": "py", "file_size_in_byte": 61727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "46", "api": [{"api_name": "sphinx.writers.latex.LaTeXTranslator", "line_number": 44, "usage_type": "name"}, {"api_name": "sphinx.util.docutils.is_html5_writer_available", "line_number": 53, "usage_type": "call"}, {"api_name": "docutils.languages.en.labels", "line_number": 68, "usage_type": "attribute"}, {"api_name": "docutils.languages.en", "line_number": 68, "usage_type": "name"}, {"api_name": "sphinx.writers.html.HTMLTranslator", "line_number": 88, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_rst_builder.RstTranslator", "line_number": 94, "usage_type": "name"}, {"api_name": "docutils.nodes.SkipNode", "line_number": 152, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 152, "usage_type": "name"}, {"api_name": "sphinx.writers.html.HTMLTranslator", "line_number": 176, "usage_type": "name"}, {"api_name": "sphinx.writers.html.HTMLTranslator.__init__", "line_number": 182, "usage_type": "call"}, {"api_name": "sphinx.writers.html.HTMLTranslator", "line_number": 182, "usage_type": "name"}, {"api_name": "sphinx.writers.html.HTMLTranslator", "line_number": 189, "usage_type": "name"}, {"api_name": "sphinx.writers.html.HTMLTranslator.visit_field", "line_number": 195, "usage_type": "call"}, {"api_name": "sphinx.writers.html.HTMLTranslator", "line_number": 195, "usage_type": "name"}, {"api_name": "docutils.nodes.SkipNode", "line_number": 199, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 199, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_rst_builder.RstTranslator", "line_number": 206, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_rst_builder.RstTranslator.__init__", "line_number": 215, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_rst_builder.RstTranslator", "line_number": 215, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_rst_builder.RstTranslator", "line_number": 220, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_md_builder.MdTranslator", "line_number": 223, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_md_builder.MdTranslator.__init__", "line_number": 232, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_md_builder.MdTranslator", "line_number": 232, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_md_builder.MdTranslator", "line_number": 237, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeTranslator", "line_number": 240, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeTranslator.__init__", "line_number": 249, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeTranslator", "line_number": 249, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeTranslator", "line_number": 250, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXTranslator", "line_number": 253, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXTranslator.__init__", "line_number": 267, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXTranslator", "line_number": 267, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXTranslator", "line_number": 273, "usage_type": "name"}, {"api_name": "sphinx.writers.html.HTMLWriter", "line_number": 332, "usage_type": "name"}, {"api_name": "sphinx.writers.html.HTMLWriter", "line_number": 350, "usage_type": "argument"}, {"api_name": "sphinxext.sphinx_rst_builder.RstWriter", "line_number": 366, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_rst_builder.RstWriter", "line_number": 378, "usage_type": "argument"}, {"api_name": "sphinxext.sphinx_md_builder.MdWriter", "line_number": 386, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_md_builder.MdWriter", "line_number": 398, "usage_type": "argument"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeWriter", "line_number": 406, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeWriter", "line_number": 418, "usage_type": "argument"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXWriter", "line_number": 426, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXWriter", "line_number": 438, "usage_type": "argument"}, {"api_name": "sphinx.application.util", "line_number": 481, "usage_type": "attribute"}, {"api_name": "sphinx.application", "line_number": 481, "usage_type": "name"}, {"api_name": "conf_path_tools.custom_ensuredir", "line_number": 481, "usage_type": "name"}, {"api_name": "sphinx.application.builders", "line_number": 482, "usage_type": "attribute"}, {"api_name": "sphinx.application", "line_number": 482, "usage_type": "name"}, {"api_name": "conf_path_tools.custom_ensuredir", "line_number": 482, "usage_type": "name"}, {"api_name": "sphinx.util.logging.pending_warnings", "line_number": 519, "usage_type": "call"}, {"api_name": "sphinx.util.status_iterator", "line_number": 520, "usage_type": "call"}, {"api_name": "sphinx.util.nodes.inline_all_toctrees", "line_number": 545, "usage_type": "call"}, {"api_name": "sphinx.util.console.darkgreen", "line_number": 546, "usage_type": "name"}, {"api_name": "docutils.nodes.reference", "line_number": 557, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 557, "usage_type": "name"}, {"api_name": "sphinx.util.osutil.relative_uri", "line_number": 614, "usage_type": "call"}, {"api_name": "html.escape", "line_number": 625, "usage_type": "call"}, {"api_name": "sphinx.util.logging.getLogger", "line_number": 654, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 665, "usage_type": "call"}, {"api_name": "_single_file_html_builder.CustomSingleFileHTMLBuilder", "line_number": 669, "usage_type": "name"}, {"api_name": "{'sphinx.util.osutil': 'sphinx.util.osutil', 'custom_ensuredir': 'conf_path_tools.custom_ensuredir', 'pending_warnings': 'sphinx.util.logging.pending_warnings', 'status_iterator': 'sphinx.util.status_iterator', 'inline_all_toctrees': 'sphinx.util.nodes.inline_all_toctrees', 'darkgreen': 'sphinx.util.console.darkgreen', 'relative_uri': 'sphinx.util.osutil.relative_uri'}._init", "line_number": 694, "usage_type": "call"}, {"api_name": "_single_file_html_builder.CustomSingleFileHTMLBuilder", "line_number": 694, "usage_type": "argument"}, {"api_name": "sphinxext.sphinx_rst_builder.RstBuilder", "line_number": 697, "usage_type": "name"}, {"api_name": "{'sphinx.util.osutil': 'sphinx.util.osutil', 'custom_ensuredir': 'conf_path_tools.custom_ensuredir', 'pending_warnings': 'sphinx.util.logging.pending_warnings', 'status_iterator': 'sphinx.util.status_iterator', 'inline_all_toctrees': 'sphinx.util.nodes.inline_all_toctrees', 'darkgreen': 'sphinx.util.console.darkgreen', 'relative_uri': 'sphinx.util.osutil.relative_uri'}._init", "line_number": 726, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_rst_builder.RstBuilder", "line_number": 726, "usage_type": "argument"}, {"api_name": "io.StringIO", "line_number": 739, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_md_builder.MdBuilder", "line_number": 743, "usage_type": "name"}, {"api_name": "{'sphinx.util.osutil': 'sphinx.util.osutil', 'custom_ensuredir': 'conf_path_tools.custom_ensuredir', 'pending_warnings': 'sphinx.util.logging.pending_warnings', 'status_iterator': 'sphinx.util.status_iterator', 'inline_all_toctrees': 'sphinx.util.nodes.inline_all_toctrees', 'darkgreen': 'sphinx.util.console.darkgreen', 'relative_uri': 'sphinx.util.osutil.relative_uri'}._init", "line_number": 768, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_md_builder.MdBuilder", "line_number": 768, "usage_type": "argument"}, {"api_name": "io.StringIO", "line_number": 781, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeBuilder", "line_number": 785, "usage_type": "name"}, {"api_name": "{'sphinx.util.osutil': 'sphinx.util.osutil', 'custom_ensuredir': 'conf_path_tools.custom_ensuredir', 'pending_warnings': 'sphinx.util.logging.pending_warnings', 'status_iterator': 'sphinx.util.status_iterator', 'inline_all_toctrees': 'sphinx.util.nodes.inline_all_toctrees', 'darkgreen': 'sphinx.util.console.darkgreen', 'relative_uri': 'sphinx.util.osutil.relative_uri'}._init", "line_number": 809, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_doctree_builder.DocTreeBuilder", "line_number": 809, "usage_type": "argument"}, {"api_name": "io.StringIO", "line_number": 822, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXBuilder", "line_number": 826, "usage_type": "name"}, {"api_name": "{'sphinx.util.osutil': 'sphinx.util.osutil', 'custom_ensuredir': 'conf_path_tools.custom_ensuredir', 'pending_warnings': 'sphinx.util.logging.pending_warnings', 'status_iterator': 'sphinx.util.status_iterator', 'inline_all_toctrees': 'sphinx.util.nodes.inline_all_toctrees', 'darkgreen': 'sphinx.util.console.darkgreen', 'relative_uri': 'sphinx.util.osutil.relative_uri'}._init", "line_number": 851, "usage_type": "call"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXBuilder", "line_number": 851, "usage_type": "argument"}, {"api_name": "sphinx.highlighting.PygmentsBridge", "line_number": 855, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 862, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 865, "usage_type": "name"}, {"api_name": "io.StringIO.write", "line_number": 868, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 868, "usage_type": "name"}, {"api_name": "io.StringIO.write", "line_number": 871, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 871, "usage_type": "name"}, {"api_name": "{'PygmentsBridge': 'sphinx.highlighting.PygmentsBridge'}.EnhancedStringIO", "line_number": 877, "usage_type": "call"}, {"api_name": "sphinx.environment.BuildEnvironment", "line_number": 882, "usage_type": "name"}, {"api_name": "sphinx.environment.BuildEnvironment.__init__", "line_number": 891, "usage_type": "call"}, {"api_name": "sphinx.environment.BuildEnvironment", "line_number": 891, "usage_type": "name"}, {"api_name": "docutils.utils.Reporter", "line_number": 900, "usage_type": "call"}, {"api_name": "sphinx.util.docutils.WarningStream", "line_number": 901, "usage_type": "call"}, {"api_name": "sphinx.transforms.SphinxTransformer", "line_number": 925, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 932, "usage_type": "name"}, {"api_name": "sphinx.util.logging.getLogger", "line_number": 994, "usage_type": "call"}, {"api_name": "sphinx.util.build_phase.BuildPhase.INITIALIZATION", "line_number": 1006, "usage_type": "attribute"}, {"api_name": "sphinx.util.build_phase.BuildPhase", "line_number": 1006, "usage_type": "name"}, {"api_name": "sphinx.registry.SphinxComponentRegistry", "line_number": 1012, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 1022, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1022, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 1023, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1023, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 1025, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1025, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 1026, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1026, "usage_type": "attribute"}, {"api_name": "sphinx.errors.ApplicationError", "line_number": 1030, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 1034, "usage_type": "call"}, {"api_name": "sphinx.events.EventManager", "line_number": 1042, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 1046, "usage_type": "call"}, {"api_name": "sphinx.__display_version__", "line_number": 1050, "usage_type": "name"}, {"api_name": "sys.version_info", "line_number": 1053, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 1053, "usage_type": "call"}, {"api_name": "sphinx.config.Tags", "line_number": 1067, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 1068, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 1069, "usage_type": "call"}, {"api_name": "sphinx.config.Config", "line_number": 1072, "usage_type": "call"}, {"api_name": "sphinx.config.Config.read", "line_number": 1075, "usage_type": "call"}, {"api_name": "sphinx.config.Config", "line_number": 1075, "usage_type": "name"}, {"api_name": "sphinx.config.Config", "line_number": 1079, "usage_type": "call"}, {"api_name": "sphinx.config.Config", "line_number": 1083, "usage_type": "call"}, {"api_name": "sphinx.config.CONFIG_FILENAME", "line_number": 1083, "usage_type": "name"}, {"api_name": "sphinx.config.Config", "line_number": 1087, "usage_type": "call"}, {"api_name": "sphinx.__display_version__", "line_number": 1088, "usage_type": "name"}, {"api_name": "sphinx.__display_version__", "line_number": 1098, "usage_type": "name"}, {"api_name": "sphinx.application.VersionRequirementError", "line_number": 1101, "usage_type": "call"}, {"api_name": "sphinx.locale._", "line_number": 1102, "usage_type": "call"}, {"api_name": "sphinx.application.builtin_extensions", "line_number": 1111, "usage_type": "name"}, {"api_name": "warnings.catch_warnings", "line_number": 1113, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 1114, "usage_type": "call"}, {"api_name": "sphinx.application.builtin_extensions", "line_number": 1120, "usage_type": "name"}, {"api_name": "sphinx.errors.ExtensionError", "line_number": 1121, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 1135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 1136, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 1136, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 1138, "usage_type": "attribute"}, {"api_name": "sphinx.util.logging.prefixed_warnings", "line_number": 1163, "usage_type": "name"}, {"api_name": "sphinx.util.logging.prefixed_warnings", "line_number": 1164, "usage_type": "call"}, {"api_name": "sphinx.application.ConfigError", "line_number": 1170, "usage_type": "call"}, {"api_name": "sphinx.locale._", "line_number": 1171, "usage_type": "call"}, {"api_name": "sphinx.project.Project", "line_number": 1218, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1252, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1252, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 1253, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1253, "usage_type": "attribute"}, {"api_name": "{'WarningStream': 'sphinx.util.docutils.WarningStream'}", "line_number": 1254, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1264, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1264, "usage_type": "attribute"}, {"api_name": "sphinx.application.ENV_PICKLE_FILENAME", "line_number": 1264, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 1268, "usage_type": "call"}, {"api_name": "{'WarningStream': 'sphinx.util.docutils.WarningStream'}", "line_number": 1277, "usage_type": "call"}, {"api_name": "sphinx.ext.extlinks.setup_link_roles", "line_number": 1309, "usage_type": "call"}, {"api_name": "docutils.nodes.image", "line_number": 1335, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 1335, "usage_type": "name"}, {"api_name": "sphinx.environment.collectors.asset.logger.setLevel", "line_number": 1360, "usage_type": "call"}, {"api_name": "sphinx.environment.collectors.asset.logger", "line_number": 1360, "usage_type": "name"}, {"api_name": "sphinx.environment.collectors.asset.logger.setLevel", "line_number": 1363, "usage_type": "call"}, {"api_name": "sphinx.environment.collectors.asset.logger", "line_number": 1363, "usage_type": "name"}, {"api_name": "sphinx.util.logging.getLogger", "line_number": 1395, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 1401, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 1402, "usage_type": "call"}, {"api_name": "sphinx.errors.ExtensionError", "line_number": 1406, "usage_type": "call"}, {"api_name": "sphinx.util.logging.getLogger", "line_number": 1420, "usage_type": "call"}, {"api_name": "sphinx.util.logging.getLogger", "line_number": 1427, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx.add_directive", "line_number": 1433, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1433, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_domain", "line_number": 1437, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1437, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.roles.register_local_role", "line_number": 1454, "usage_type": "call"}, {"api_name": "docutils.parsers.rst.roles", "line_number": 1454, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.roles.GenericRole", "line_number": 1460, "usage_type": "call"}, {"api_name": "docutils.parsers.rst.roles", "line_number": 1460, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.roles.register_local_role", "line_number": 1461, "usage_type": "call"}, {"api_name": "docutils.parsers.rst.roles", "line_number": 1461, "usage_type": "name"}, {"api_name": "docutils.nodes._add_node_class_names", "line_number": 1466, "usage_type": "call"}, {"api_name": "docutils.nodes", "line_number": 1466, "usage_type": "name"}, {"api_name": "sphinx.errors.ExtensionError", "line_number": 1471, "usage_type": "call"}, {"api_name": "sphinx.writers.html.HTMLTranslator", "line_number": 1479, "usage_type": "argument"}, {"api_name": "sphinx.util.docutils.is_html5_writer_available", "line_number": 1480, "usage_type": "call"}, {"api_name": "sphinx.writers.html5.HTML5Translator", "line_number": 1482, "usage_type": "name"}, {"api_name": "sphinxext.sphinx_latex_builder.EnhancedLaTeXBuilder", "line_number": 1486, "usage_type": "argument"}, {"api_name": "sphinx.writers.text.TextTranslator", "line_number": 1489, "usage_type": "name"}, {"api_name": "sphinx.writers.manpage.ManualPageTranslator", "line_number": 1492, "usage_type": "name"}, {"api_name": "sphinx.writers.texinfo.TexinfoTranslator", "line_number": 1495, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_event", "line_number": 1504, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1504, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_config_value", "line_number": 1509, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1509, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_directive_to_domain", "line_number": 1514, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1514, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_role_to_domain", "line_number": 1519, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1519, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_transform", "line_number": 1523, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1523, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_post_transform", "line_number": 1527, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1527, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_js_file", "line_number": 1532, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1532, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_css_file", "line_number": 1536, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1536, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_latex_package", "line_number": 1540, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1540, "usage_type": "name"}, {"api_name": "sphinx.application.Sphinx.add_object_type", "line_number": 1550, "usage_type": "call"}, {"api_name": "sphinx.application.Sphinx", "line_number": 1550, "usage_type": "name"}]} {"seq_id": "12783295224", "text": "from pyspark import SparkContext,SparkConf\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import SparkSession\nfrom datetime import datetime, date, timedelta\n\nfrom imports.plotter import Plotter\n\nimport time\nimport sys\n\ndef main(argv):\n plotter = Plotter()\n\n y = []\n x = []\n\n uri_conf = 'mongodb://localhost:27017/meteo_station.outside'\n spark = SparkSession \\\n .builder \\\n .appName(\"Meteo_Station\") \\\n .config(\"spark.mongodb.input.uri\", uri_conf) \\\n .config(\"spark.executor.memory\", \"4g\") \\\n .config(\"spark.executor.number\", \"4\") \\\n .config(\"spark.executor.cores\", \"4\") \\\n .config(\"spark.network.timeout\", \"800s\") \\\n .config('spark.yarn.executor.memoryOverhead', \"4096m\") \\\n .config(\"spark.driver.memoryOverhead\",) \\\n .getOrCreate()\n\n df = spark.read.format(\"com.mongodb.spark.sql.DefaultSource\").load()\n df.printSchema()\n df.registerTempTable('temp')\n pressure = spark.sql('SELECT * FROM temp WHERE sensor = \\'BMP Pressure \\' ')\n coll = pressure.collect()\n\n for c in coll:\n y.append(float(c['data']))\n\n date = c['date']\n xtime = c['time']\n dtime = date + '_' + xtime\n\n try:\n xdtime = datetime.strptime(dtime, '%Y_%m_%d_%H:%M:%S')\n except:\n xdtime = datetime.strptime(dtime, '%Y_%m_%d_%H:%M:%S.%f')\n \n x.append(xdtime)\n\n print('-->Data count\\t: ' + str(len(x)))\n\n plotter.show_plotter(x, y, plotLabel = 'Pressure (BMP180)', yLabel = 'mm Hg')\n\nif (__name__ == '__main__'):\n main(sys.argv[1:])", "repo_name": "DucklingDark/Meteo_Station", "sub_path": "python/mongo_spark.py", "file_name": "mongo_spark.py", "file_ext": "py", "file_size_in_byte": 1669, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "imports.plotter.Plotter", "line_number": 12, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 18, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}]} {"seq_id": "43149787832", "text": "import json\nimport urllib.request as urlr\n\nfrom geopandas.geodataframe import GeoDataFrame\nfrom src.helpers.database import Database\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import shapely \nfrom src.helpers.environment_variables import DATABASE_URL, SUPERUSER_DATABASE_URL\n\ndef download_epa_sld_data_as_geojson(filepath:str):\n \"\"\"\n - This function downloads a geojson from the EPA's Smart Location Database\n - It uses a loop to iterate over all features in the table, which gets around\n the issue of the EPA rest service not allowing all of the features to be queried at once\n \"\"\"\n\n # set variables for map service\n # map service url\n mapservice = (\n r\"https://geodata.epa.gov/arcgis/rest/services/OA/SmartLocationDatabase/MapServer/15/\"\n )\n # map service query\n query = r'query?where=GEOID10+like+%2734005%25%27+or+GEOID10+like+%2734007%25%27+or+GEOID10+like+%2734015%25%27+or+GEOID10+like+%2734021%25%27'\n\n # geojson structure\n dict = '{\"type\": \"FeatureCollection\"}'\n geojson = json.loads(dict)\n geojson[\"features\"] = []\n\n # find total records allowed to be queried from the mapservice\n maxurl = mapservice + r\"?f=json\"\n response = urlr.urlopen(maxurl)\n maxjson = json.load(response)\n maxcount = int(maxjson[\"maxRecordCount\"])\n\n # query all objectids, count total objectids\n oidurl = mapservice + query + r\"&returnIdsOnly=true&f=json\"\n print(oidurl)\n\n response = urlr.urlopen(oidurl)\n print(response)\n oidjson = json.load(response)\n print(oidjson) \n idfield = oidjson[\"objectIdFieldName\"]\n print(idfield)\n idlist = oidjson[\"objectIds\"]\n idlist.sort()\n numrec = len(idlist)\n\n # iterate through map service objectids by max record count\n for i in range(0, numrec, maxcount):\n torec = i + (maxcount - 1)\n if torec > numrec:\n torec = numrec - 1\n fromid = idlist[i]\n toid = idlist[torec]\n where = r\"{}+%3E%3D+{}+and+{}+%3C%3D+{}\".format(idfield, fromid, idfield, toid)\n # possible to adjust api parameters here to request only specified field (outfields) or even change projection (outsr)\n urlstring = (\n mapservice\n + \"query?where={}&outfields=*&outsr=4326&returnGeometry=true&f=geojson\".format(where)\n )\n response = urlr.urlopen(urlstring)\n testjson = json.load(response)\n for feature in testjson[\"features\"]:\n # append each feature to geojson dict\n geojson[\"features\"].append(feature)\n\n # write final geojson to location on hard drive\n file_path = r\"/Users/fakram/Downloads/SLD.geojson\"\n\n with open(file_path, \"w\") as outfile:\n json.dump(geojson, outfile)\n\n return file_path\n\nif __name__ == \"__main__\":\n db = Database()\n\n print(\"-\" * 80)\n print(\"Importing data from SLD/Map Service\")\n print(\"-\" * 80)\n\n \n filepath = download_epa_sld_data_as_geojson(\"./SLD.geojson\")\n gdf = gpd.read_file(filepath)\n explodedgdf = gdf.explode()\n db.import_geodataframe(explodedgdf, new_tablename=\"sld\", schema=\"extract\")\n\n\n", "repo_name": "dvrpc/DVRPC-VisionEval-Inputs-Preparation", "sub_path": "src/step_01_extract/mapservice_to_geojson.py", "file_name": "mapservice_to_geojson.py", "file_ext": "py", "file_size_in_byte": 3124, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 33, "usage_type": "name"}, {"api_name": "json.load", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 41, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 41, "usage_type": "name"}, {"api_name": "json.load", "line_number": 43, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 64, "usage_type": "name"}, {"api_name": "json.load", "line_number": 65, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 74, "usage_type": "call"}, {"api_name": "src.helpers.database.Database", "line_number": 79, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 87, "usage_type": "call"}]} {"seq_id": "13924678442", "text": "import game\nimport pygame\nfrom kuposztok.Info.InfoActors import *\nimport kuposztok\n\nclass InfoStage(game.scene2d.MyStage):\n\n def soundvaltread(self):\n with open('../kuposztok/Save/options.txt', 'r') as beskinfile1:\n self.soundvaltbe = int(beskinfile1.readline())\n self.musica = int(beskinfile1.readline())\n self.allstagebe = int(beskinfile1.readline())\n beskinfile1.close()\n\n def __init__(self):\n super().__init__()\n self.height = pygame.display.get_surface().get_height()\n self.width = pygame.display.get_surface().get_width()\n self.soundvaltread()\n self.musicaselect = self.musica\n self.allstageben = self.allstagebe\n self.bg = BgActor()\n self.add_actor(self.bg)\n self.soundvalt = self.soundvaltbe\n pygame.mixer.init()\n self.allstage = False\n if self.allstageben == 0 or self.allstageben == 1:\n self.allstage = True\n if self.allstageben == 2:\n self.allstage = False\n if self.allstage == False:\n pygame.mixer.music.load(\"../kuposztok/music/infomusica.wav\")\n else:\n if self.musicaselect == 0 or self.musicaselect == 1:\n pygame.mixer.music.load(\"../kuposztok/music/gamemusica1.wav\")\n if self.musicaselect == 2:\n pygame.mixer.music.load(\"../kuposztok/music/gamemusica2.wav\")\n if self.musicaselect == 3:\n pygame.mixer.music.load(\"../kuposztok/music/gamemusica3.wav\")\n if self.musicaselect == 4:\n pygame.mixer.music.load(\"../kuposztok/music/gamemusica4.wav\")\n if self.musicaselect == 5:\n pygame.mixer.music.load(\"../kuposztok/music/gamemusica5.wav\")\n pygame.mixer.music.play(-1)\n if self.soundvalt == 0 or self.soundvalt == 1:\n pygame.mixer.music.set_volume(0.5)\n if self.soundvalt == 2:\n pygame.mixer.music.set_volume(0.20)\n if self.soundvalt == 3:\n pygame.mixer.music.set_volume(0.07)\n if self.soundvalt == 4:\n pygame.mixer.music.stop()\n self.text1 = game.scene2d.MyLabel(\"A játék lényege, hogy a karakterünkel minél több\")\n self.text1.set_color(0, 0, 0)\n self.text1.x = 100\n self.text1.y = 0 + self.text1.get_height()\n self.add_actor(self.text1)\n self.text2 = game.scene2d.MyLabel(\"pontot érjünk el úgy, hogy a fákat kerülgetjük.\")\n self.text2.set_color(0, 0, 0)\n self.text2.x = 100\n self.text2.y = 0 + self.text2.get_height() * 2\n self.add_actor(self.text2)\n self.text3 = game.scene2d.MyLabel(\"A játék ezek mellet tartalmaz még egyéb tárgyakat,\")\n self.text3.set_color(0, 0, 0)\n self.text3.x = 100\n self.text3.y = 0 + self.text3.get_height() * 3\n self.add_actor(self.text3)\n self.text31 = game.scene2d.MyLabel(\"mint például az energiaital, vagy a trap:\")\n self.text31.set_color(0, 0, 0)\n self.text31.x = 100\n self.text31.y = 0 + self.text31.get_height() * 4\n self.add_actor(self.text31)\n self.energy = Energy()\n self.energy.x = self.text3.get_x()\n self.energy.y = 0 + self.text3.get_height() * 5\n self.energy.width = 150\n self.energy.height = 150\n self.add_actor(self.energy)\n self.trap = Trap()\n self.trap.x = self.text3.get_x() + self.text3.get_x() * 3\n self.trap.y = 0 + self.text3.get_height() * 5\n self.add_actor(self.trap)\n self.text4 = game.scene2d.MyLabel(\"A játékost a billentyűzeten lévő 'w', 'a', 's', 'd' gombokkal\")\n self.text4.set_color(0, 0, 0)\n self.text4.x = 100\n self.text4.y = self.height / 2 + self.text4.get_height()\n self.add_actor(self.text4)\n self.text5 = game.scene2d.MyLabel(\"tudjuk irányítani. Multiplayer módban az egyik karaktert\")\n self.text5.set_color(0, 0, 0)\n self.text5.x = 100\n self.text5.y = self.height / 2 + self.text5.get_height() * 2\n self.add_actor(self.text5)\n self.text6 = game.scene2d.MyLabel(\"szintén ezekkel a gombokkal, míg a másikat pedig a \")\n self.text6.set_color(0, 0, 0)\n self.text6.x = 100\n self.text6.y = self.height / 2 + self.text5.get_height() * 3\n self.add_actor(self.text6)\n self.text7 = game.scene2d.MyLabel(\"billyentyűzeten lévő nyilakkal. Visszalépés \")\n self.text7.set_color(0, 0, 0)\n self.text7.x = 100\n self.text7.y = self.height / 2 + self.text7.get_height() * 4\n self.add_actor(self.text7)\n self.text71 = game.scene2d.MyLabel(\"az 'ESC' gombbal történik, valamint ez a gomb a menüben\")\n self.text71.set_color(0, 0, 0)\n self.text71.x = 100\n self.text71.y = self.height / 2 + self.text71.get_height() * 5\n self.add_actor(self.text71)\n self.text72 = game.scene2d.MyLabel(\"a kilépést szolgálja.\")\n self.text72.set_color(0, 0, 0)\n self.text72.x = 100\n self.text72.y = self.height / 2 + self.text72.get_height() * 6\n self.add_actor(self.text72)\n self.back = Back()\n self.add_actor(self.back)\n self.back.x = self.width - self.back.get_width()\n self.back.y = self.height - self.back.get_height()\n\n self.set_on_key_down_listener(self.katt)\n self.back.set_on_mouse_down_listener(self.Back)\n\n def katt(self, sender, event):\n if event.key == pygame.K_ESCAPE:\n self.screen.game.set_screen(kuposztok.Menu.MenuScreen.MenuScreen())\n\n def Back(self, sender, event):\n if event.button == 1:\n self.screen.game.set_screen(kuposztok.Menu.MenuScreen.MenuScreen())\n", "repo_name": "csany2020c/MyGame", "sub_path": "kuposztok/Info/InfoStage.py", "file_name": "InfoStage.py", "file_ext": "py", "file_size_in_byte": 5757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "game.scene2d", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 52, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 53, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 53, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 58, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 58, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 63, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 63, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 68, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 68, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 83, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 83, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 88, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 88, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 93, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 93, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 98, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 98, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 103, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 103, "usage_type": "attribute"}, {"api_name": "game.scene2d.MyLabel", "line_number": 108, "usage_type": "call"}, {"api_name": "game.scene2d", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 122, "usage_type": "attribute"}, {"api_name": "kuposztok.Menu.MenuScreen.MenuScreen", "line_number": 123, "usage_type": "call"}, {"api_name": "kuposztok.Menu", "line_number": 123, "usage_type": "attribute"}, {"api_name": "kuposztok.Menu.MenuScreen.MenuScreen", "line_number": 127, "usage_type": "call"}, {"api_name": "kuposztok.Menu", "line_number": 127, "usage_type": "attribute"}]} {"seq_id": "5869381803", "text": "from django.urls import path\nfrom django.views.generic import TemplateView\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('register/', views.register, name='register'),\n path('requirements/', views.requirements, name='requirements'),\n path('types/', views.types, name='types'),\n path('tips/', views.tips, name='tips'),\n path('login/', views.log, name='login'),\n path('donor_register/', views.donor_register.as_view(), name='donor_register'),\n path('center_register/', views.center_register.as_view(), name='center_register'),\n path('quiz/', views.quiz, name='quiz'),\n path('edit_profile/',views.edit_profile,name ='edit_profile') ,\n path('profiles/',views.profiles,name ='profiles') ,\n path('search/', views.search_results, name='search_results'),\n path('create_event/', views.create_event.as_view(), name='create_event'),\n path('appointment/', views.create_appointment.as_view(), name='appointment'),\n path('events/', views.events, name='events'),\n path('center/', views.cent, name='center'),\n path('logout/', views.pagelogout, name='logout'),\n # path( \"/dashboard\",\n # TemplateView.as_view(template_name=\"layouts/dashboard.html\"),\n # name=\"dashboard\",),\n # path('accounts/logout/', views.LogoutView.as_view(template_name=\"post_list.html\"), name='logout'),\n # path(\"/\", views.quiz, name=\"quiz\"), \n # path(\"quiz/\", views.quiz, name=\"quiz\"), \n # path('/data/', views.quiz_data_view, name='quiz-data'),\n # path('/save/', views.save_quiz_view, name='quiz-save'),\n]", "repo_name": "Anabella1109/Blood-donation", "sub_path": "mysite/blooddonation/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1639, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}]} {"seq_id": "9969851121", "text": "from rest_framework.views import exception_handler\nfrom rest_framework.exceptions import (\n AuthenticationFailed,\n NotFound,\n PermissionDenied,\n ValidationError,\n MethodNotAllowed\n)\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\ndef custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n if isinstance(exc, AuthenticationFailed):\n response = Response(\n {\n 'status_code': status.HTTP_401_UNAUTHORIZED,\n 'message': 'Authentication failed',\n 'errors': []\n },\n status=status.HTTP_401_UNAUTHORIZED\n )\n elif isinstance(exc, ValidationError):\n print(exc)\n response = Response(\n {\n 'status_code': status.HTTP_400_BAD_REQUEST,\n 'message': 'Bad request',\n 'errors': str(exc.detail)\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n elif isinstance(exc, PermissionDenied):\n response = Response(\n {\n 'status_code': status.HTTP_403_FORBIDDEN,\n 'message': 'Permission denied',\n 'errors': []\n },\n status=status.HTTP_403_FORBIDDEN\n )\n elif isinstance(exc, NotFound):\n response = Response(\n {\n 'status_code': status.HTTP_404_NOT_FOUND,\n 'message': 'Not found',\n 'errors': []\n },\n status=status.HTTP_404_NOT_FOUND\n )\n elif isinstance(exc, MethodNotAllowed):\n response = Response(\n {\n 'status_code': status.HTTP_405_METHOD_NOT_ALLOWED,\n 'message': 'Method not allowed',\n 'errors': []\n },\n status=status.HTTP_405_METHOD_NOT_ALLOWED\n )\n\n return response", "repo_name": "QuangDat2412/base-project", "sub_path": "backend/utils/exception_handler.py", "file_name": "exception_handler.py", "file_ext": "py", "file_size_in_byte": 1893, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "rest_framework.views.exception_handler", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.AuthenticationFailed", "line_number": 15, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 24, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 28, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 32, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 34, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 41, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.NotFound", "line_number": 43, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 44, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 46, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 46, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 50, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 50, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.MethodNotAllowed", "line_number": 52, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_405_METHOD_NOT_ALLOWED", "line_number": 55, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 55, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_405_METHOD_NOT_ALLOWED", "line_number": 59, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 59, "usage_type": "name"}]} {"seq_id": "13085376838", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nPrediction on video using\npytorch model\n\"\"\"\n# import the necessary packages\nfrom __future__ import print_function\nimport argparse\nimport json\nfrom collections import deque\nimport torch\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\nimport cv2\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to trained serialized model\")\nap.add_argument(\"-w\", \"--weights\", required=True,\n help=\"path to the parameters of the model\")\nap.add_argument(\"-i\", \"--input\", required=True,\n help=\"path to our input video\")\nap.add_argument(\"-l\", \"--labels\", required=True,\n help=\"path to the labels\")\nap.add_argument(\"-o\", \"--output\", required=True,\n help=\"path to our output video\")\nap.add_argument(\"-s\", \"--size\", type=int, default=1,\n help=\"size of queue for averaging\")\nargs = vars(ap.parse_args())\n\n# load dictionary of labels\nprint(\"[INFO] loading labels...\")\njson_file = open(args['labels'])\nlabel_dict = json.load(json_file)\n\n# load the trained model\nprint(\"[INFO] loading model...\")\nloc = torch.load(args[\"weights\"], map_location=device)\nmodel = torch.load(args[\"model\"], map_location=device)\nmodel.load_state_dict(loc[\"model\"])\nmodel.eval()\n\ndata_transforms = {\n 'val': transforms.Compose([transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\n}\n\ndef predict(image, model):\n # Pass the image through our model\n image_tensor = data_transforms['val'](image).float()\n image_tensor = image_tensor.unsqueeze_(0)\n images = Variable(image_tensor)\n image_tensor = images.to(device)\n torch.no_grad()\n predict = F.softmax(model(image_tensor))\n print(predict.detach().cpu().numpy())\n return predict.cpu().detach().numpy()\n\n# initialize the image mean for mean subtraction along with the\n# predictions queue\nQ = deque(maxlen=args[\"size\"])\n\n# initialize the video stream, pointer to output video file, and\n# frame dimensions\nvs = cv2.VideoCapture(args[\"input\"])\nwriter = None\n(W, H) = (None, None)\n\n# loop over frames from the video file stream\nwhile True:\n # read the next frame from the file\n (grabbed, frame) = vs.read()\n\n # if the frame was not grabbed, then we have reached the end\n # of the stream\n if not grabbed:\n break\n\n # if the frame dimensions are empty, grab them\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n # clone the output frame, then convert it from BGR to RGB\n # ordering, resize the frame to a fixed 224x224, and then\n # perform mean subtraction\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # make predictions on the frame and then update the predictions\n # queue\n preds = predict(frame, model)\n Q.append(preds)\n\n # perform prediction averaging over the current history of\n # previous predictions\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = label_dict[str(i)]\n\n # draw the condition on the output frame\n text = \"{}\".format(label)\n cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX,\n 1.25, (0, 255, 0), 5)\n\n # check if the video writer is None\n if writer is None:\n # initialize our video writer\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n (W, H), True)\n\n # write the output frame to disk\n writer.write(output)\n\n # show the output image\n cv2.imshow(\"Output\", output)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n# release the file pointers\nprint(\"[INFO] cleaning up...\")\nwriter.release()\nvs.release()\n", "repo_name": "ReconAI/AM-Traffic-Phase2.Iteration2_Task-1-Pytorch", "sub_path": "predict_video_torch.py", "file_name": "predict_video_torch.py", "file_ext": "py", "file_size_in_byte": 4050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.device", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 18, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 49, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 50, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 60, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 107, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 122, "usage_type": "call"}]} {"seq_id": "74814177083", "text": "from roughsets.roughsets import *\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport numpy as np\r\n\r\n\r\n'''\r\nDetermina le soglie utilizzando tre metodi: OPTIMIZER, FORZA BRUTA e VISUALE (scrivo nel file rfilename le prime due coppie)\r\n'''\r\ndef determineThresholds(dtable, ncol, concept, rfilename, output, path, kind):\r\n\r\n f = open(rfilename, \"w\")\r\n f.write(\"beta,alpha\\n\")\r\n\r\n nrs = RoughSets()\r\n print(dtable)\r\n output.write(str(dtable) + '\\n')\r\n\r\n\r\n nrs.setInformationSystem(dtable, np.array([i for i in range(ncol)]))\r\n\r\n b1 = (0.0, 0.5)\r\n b2 = (0.5, 1.0)\r\n xinit = [0.3, 0.7]\r\n beta, alpha = nrs.calculateAlphaBeta(concept, b1, b2, xinit)\r\n\r\n print(\"* Optimizer\")\r\n output.write(\"* Optimizer \\n\")\r\n\r\n print(\"BETA, ALFA: \", beta, alpha)\r\n output.write(str(beta) + \",\" + str(alpha) + \"\\n\")\r\n f.write(str(beta)+\",\"+str(alpha)+\"\\n\")\r\n\r\n b = np.linspace(0.0, 0.5, 5)\r\n a = np.linspace(0.5, 1.0, 5)\r\n\r\n A, B = np.meshgrid(a, b)\r\n\r\n HM=list()\r\n for i in range(len(B)):\r\n HR = list()\r\n for j in range(len(A)):\r\n HR.append(nrs.Entropy((B[i][j], A[i][j])))\r\n HM.append(HR)\r\n\r\n print(HM)\r\n output.write(str(HM)+'\\n')\r\n\r\n ind = np.unravel_index(np.argmin(np.array(HM), axis=None), np.array(HM).shape)\r\n print(\"ARGMIN: \",ind)\r\n output.write(\"* ARGMIN: \"+str(ind)+ '\\n')\r\n\r\n print(\"MIN: \", np.min(HM))\r\n output.write(\"MIN: \"+str(np.min(HM))+'\\n')\r\n\r\n print(\"* Brute Force\")\r\n output.write(\"* Brute Force \\n\")\r\n\r\n print(\"BETA, ALFA: \", b[ind[0]], a[ind[1]])\r\n output.write(\"BETA, ALFA: \"+str([ind[0]])+','+str(a[ind[1]])+\"\\n\")\r\n\r\n f.write(str(b[ind[0]])+\",\"+str(a[ind[1]])+\"\\n\")\r\n f.close()\r\n\r\n print(\"* Visual\")\r\n output.write('* Visual\\n')\r\n ax = plt.axes(projection='3d')\r\n ax.plot_surface(A, B, np.array(HM), rstride=1, cstride=1,\r\n cmap='viridis', edgecolor='none')\r\n ax.set_title('surface')\r\n ax.set_xlabel('alpha')\r\n ax.set_ylabel('beta')\r\n ax.set_zlabel('entropy')\r\n #plt.show()\r\n plt.savefig(path+'/'+str(kind)+'.png')\r\n\r\n\r\n\r\n'''\r\nApplica il 3WD probabilistico con le soglie date e calcola l'esagono di opposizione\r\n'''\r\ndef exagonOpposition(dtable, ncol, concept, beta, alpha, output):\r\n nrs = RoughSets()\r\n nrs.setInformationSystem(dtable, np.array([i for i in range(ncol)]))\r\n\r\n regions = nrs.pcalculate3WD(concept, alpha, beta)\r\n regions = list(regions)\r\n\r\n POS = regions[0]\r\n BND = regions[1]\r\n NEG = regions[2]\r\n\r\n print(\"POS: \", POS)\r\n output.write(\"POS: \" + str(POS) + '\\n')\r\n print(\"BND: \", BND)\r\n output.write(\"BND: \" + str(BND) + '\\n')\r\n print(\"NEG: \", NEG)\r\n output.write(\"NEG: \" + str(NEG) + '\\n')\r\n regs = {\"POS\": POS, \"BND\": BND, \"NEG\": NEG}\r\n points = {\"A\": {}, \"B\": {}, \"C\": {}, \"D\": {}, \"E\": {}, \"F\": {}}\r\n\r\n points[\"A\"] = POS\r\n points[\"B\"] = NEG\r\n points[\"F\"] = BND\r\n points[\"D\"] = NEG.union(BND)\r\n points[\"C\"] = POS.union(BND)\r\n points[\"E\"] = POS.union(NEG)\r\n\r\n return regs, points\r\n\r\n\r\ndef mainNew(path,i):\r\n\r\n print(\"------------------------------\"+str(i)+\"/\"+str(i+1)+\"-------------------------------------\")\r\n with open(path+'regions.txt', 'w') as output:\r\n # Numero di attributi condizionali del primo dataset\r\n NCOL1=6\r\n\r\n data1 = pd.read_csv(path+\"decision_table_base.csv\")\r\n data1=data1.drop(columns=\"usr_index\")\r\n infoTable1 = data1.values\r\n\r\n # filtro sulla colonna decisionale (il concetto da studiare deve essere cambiato in questa riga)\r\n decisions_d1 = data1[data1.decision == 2]\r\n decisions_d1 = np.array(list(decisions_d1.index))\r\n\r\n determineThresholds(infoTable1, NCOL1, decisions_d1, path+\"thresholds_step1.csv\", output, path, 'base')\r\n\r\n f=open(path+\"thresholds_step1.csv\", \"r\")\r\n lfile=list()\r\n\r\n for line in f:\r\n lfile.append(line.strip())\r\n f.close()\r\n\r\n # uso solo i valori calcolati con forza bruta\r\n beta1 = float(lfile[2].split(\",\")[0])\r\n alpha1 = float(lfile[2].split(\",\")[1])\r\n\r\n regions1, points1 = exagonOpposition(infoTable1, NCOL1, decisions_d1, beta1, alpha1, output)\r\n\r\n for k, v in points1.items():\r\n print(\"POINT: \", k, \" VALUE: \", v)\r\n output.write(\"POINT: \"+str(k)+\" VALUE \"+str(v)+'\\n')\r\n\r\n for k, v in regions1.items():\r\n print(\"REGION: \", k, \" VALUE: \", v)\r\n output.write(\"REGION: \" + str(k) + \" VALUE \" + str(v) + '\\n')\r\n # Numero di attributi condizionali del secondo dataset\r\n NCOL2=7\r\n\r\n data2 = pd.read_csv(path+\"decision_table_t1.csv\")\r\n data2=data2.drop(columns=\"usr_index\")\r\n infoTable2 = data2.values\r\n\r\n # filtro sulla colonna decisionale (il concetto da studiare deve essere cambiato in questa riga)\r\n decisions_d2 = data2[data2.decision == 2]\r\n decisions_d2 = np.array(list(decisions_d2.index))\r\n\r\n determineThresholds(infoTable2, NCOL2, decisions_d2, path+\"thresholds_step2.csv\", output, path, 't1')\r\n\r\n\r\n f=open(path+\"thresholds_step2.csv\", \"r\")\r\n lfile=list()\r\n\r\n for line in f:\r\n lfile.append(line.strip())\r\n f.close()\r\n\r\n # uso solo i valori calcolati con forza bruta\r\n beta2 = float(lfile[2].split(\",\")[0])\r\n alpha2 = float(lfile[2].split(\",\")[1])\r\n\r\n regions2, points2 = exagonOpposition(infoTable2, NCOL2, decisions_d2, beta2, alpha2, output)\r\n\r\n for k, v in points2.items():\r\n print(\"POINT: \", k, \" VALUE: \", v)\r\n output.write(\"POINT: \" + str(k) + \" VALUE \" + str(v) + '\\n')\r\n\r\n for k, v in regions2.items():\r\n print(\"REGION: \", str(k), \" VALUE: \", str(v))\r\n output.write(\"REGION: \" + str(k) + \" VALUE \" + str(v) + '\\n')\r\n mu, xi = utilityMeasures((regions1, regions2), (points1, points2), ((beta1, alpha1),(beta2, alpha2)), (data1, data2), (decisions_d1, decisions_d2))\r\n\r\n print(\"MU: \", mu)\r\n output.write(\"MU: \" + str(mu) + '\\n')\r\n print(\"XI: \", xi)\r\n output.write(\"XI: \" + str(xi) + '\\n')\r\n\r\n\r\n\r\ndef communityProbability(community, concept):\r\n return len(set(community).intersection(set(concept)))/len(set(concept))\r\n\r\n \r\ndef calculateX(POS_T1, POS_T2, concept, data):\r\n data1=data[0]\r\n data2=data[1]\r\n\r\n if len(POS_T2) > len(POS_T1):\r\n probabilities = list()\r\n communities = set()\r\n irange=set(POS_T2).difference(set(POS_T1))\r\n for i in list(irange):\r\n ci=data1.iloc[i][\"community\"]\r\n communities.add(ci)\r\n for i in list(communities):\r\n datax=data1[data1.community==i]\r\n datax = list(datax.index)\r\n prob=communityProbability(datax, concept)\r\n probabilities.append(prob)\r\n mini=0\r\n if len(probabilities)!=0:\r\n mini=min(probabilities)\r\n return mini\r\n else:\r\n probabilities = list()\r\n communities = set()\r\n irange=set(POS_T1).difference(set(POS_T2))\r\n for i in list(irange):\r\n ci=data1.iloc[i][\"community\"]\r\n communities.add(ci)\r\n for i in list(communities):\r\n datax=data1[data1.community==i]\r\n datax = list(datax.index)\r\n prob=communityProbability(datax, concept)\r\n probabilities.append(prob)\r\n maxi=0\r\n if len(probabilities)!=0:\r\n maxi=max(probabilities)\r\n return maxi\r\n\r\ndef calculateY(NEG_T1, NEG_T2, concept, data):\r\n data1=data[0]\r\n data2=data[1]\r\n\r\n if len(NEG_T2) > len(NEG_T1):\r\n probabilities = list()\r\n communities = set()\r\n irange=set(NEG_T2).difference(set(NEG_T1))\r\n for i in irange:\r\n ci=data1.iloc[i][\"community\"]\r\n communities.add(ci)\r\n for i in list(communities):\r\n datax=data1[data1.community==i]\r\n datax = list(datax.index)\r\n prob=communityProbability(datax, concept)\r\n probabilities.append(prob)\r\n maxi=0\r\n if len(probabilities)!=0:\r\n maxi=max(probabilities)\r\n return maxi\r\n else:\r\n probabilities = list()\r\n communities = set()\r\n irange=set(NEG_T1).difference(set(NEG_T2))\r\n for i in irange:\r\n ci=data1.iloc[i][\"community\"]\r\n communities.add(ci)\r\n for i in list(communities):\r\n datax=data1[data1.community==i]\r\n datax = list(datax.index)\r\n prob=communityProbability(datax, concept)\r\n probabilities.append(prob)\r\n mini=0\r\n if len(probabilities)!=0:\r\n mini=min(probabilities)\r\n return mini\r\n\r\ndef utilityMeasures(regions, points, thresholds, datas, concepts):\r\n regions1=regions[0]\r\n regions2=regions[1]\r\n points1=points[0]\r\n points2=points[1]\r\n beta1 = thresholds[0][0]\r\n alpha1 = thresholds[0][1]\r\n beta2 = thresholds[1][0]\r\n alpha2 = thresholds[1][1]\r\n data1 = datas[0]\r\n data2 = datas[1]\r\n concept1 = concepts[0]\r\n concept2 = concepts[1]\r\n \r\n x=calculateX(regions1[\"POS\"], regions2[\"POS\"], concept1, datas)\r\n y=calculateY(regions1[\"NEG\"], regions2[\"NEG\"], concept1, datas)\r\n\r\n mu = None\r\n xi = None\r\n if abs(alpha1-x) > 0:\r\n mu = (len(list(regions2[\"POS\"]))-len(list(regions1[\"POS\"])))/(abs(alpha1-x))\r\n if abs(y-beta1) > 0:\r\n xi = (len(list(regions2[\"NEG\"]))-len(list(regions1[\"NEG\"])))/(abs(y-beta1))\r\n \r\n return mu, xi\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n for i in range(5,23):\r\n path = 'fludata_luigi/table_pairs/'+str(i)+'-'+str(i+1)+'/'\r\n mainNew(path, i)\r\n", "repo_name": "luilom/Phd_Fact_Checking", "sub_path": "Prima Versione/script_roughsets_prof/testFlu_Luigi.py", "file_name": "testFlu_Luigi.py", "file_ext": "py", "file_size_in_byte": 9745, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}]} {"seq_id": "74415442363", "text": "###### import ######\nfrom threading import Thread, Lock\nfrom enum import Enum\nimport sys\nfrom loguru import logger\nfrom pyloramac import *\nfrom ipaddress import IPv6Address, AddressValueError\nfrom scapy.all import *\nfrom time import *\n###### LOG Configuration ######\nLOG_FORMAT = \"(\"+str(perf_counter_ns())+\"){time: HH:mm:ss.SSS} | \"+\\\n \"{level: <8} |\"+\\\n \"{level.icon} {name}\"+\\\n \":{function}:{line} \"+\\\n \"- {message}\"+\\\n \"\\n{exception}\"\nLOG_CONFIG = {\n \"handlers\": [\n {\"sink\": sys.stdout, \"format\": lambda record: LOG_FORMAT, \"level\": \"INFO\"},\n #{\"sink\":\"file_{time}.log\", \"format\": lambda record: LOG_FORMAT, \"level\": \"INFO\"},\n ]\n}\n###### UDP Configuration ######\nUDP_CLIENT_PORT = 8765\nUDP_SERVER_PORT = 5678\n\nSEND_INTERVAL = 5 # s\nMAX_PAQUET_COUNT = 100\n###### MAIN APP ######\nclass Mode(Enum):\n PINGPONG = 0\n RECEIVER = 1\n SENDER = 2\n\nclass Child:\n def __init__(self, mode: Mode, first_data: IPv6):\n self.mode = mode\n self.addr = first_data.src\n self.count = 0\n self.sender = None\n \n if mode == Mode.PINGPONG:\n self.receive(first_data)\n if mode == Mode.SENDER:\n self.sender = Thread(target=self.send)\n self.sender.start()\n\n def send(self):\n while self.count < MAX_PAQUET_COUNT:\n data = IPv6(src=NETWORK_STACK.node_ip_addr, dst=self.addr)/UDP(sport=UDP_SERVER_PORT, dport=UDP_CLIENT_PORT)/Raw(load=f\"hello {self.count}\")\n logger.log(\"APP\", f\"Send {data[UDP][Raw].load.decode()} to {data.dst}\")\n NETWORK_STACK.send(data)\n sleep(SEND_INTERVAL)\n self.count += 1\n\n def receive(self, data):\n logger.log(\"APP\", f\"Receive {data[UDP][Raw].load.decode()} from {data.src}\")\n new_data = IPv6(src=NETWORK_STACK.node_ip_addr, dst=self.addr)/UDP(sport=UDP_SERVER_PORT, dport=UDP_CLIENT_PORT)/Raw(load=f\"PONG {self.count}\")\n logger.log(\"APP\", f\"send {data[UDP][Raw].load.decode()} to {new_data.dst}\")\n NETWORK_STACK.send(new_data)\n self.count +=1\n\nclass LoRaRoot:\n def __init__(self, mode:Mode):\n self.childs = {}\n self.mode = mode\n\n def init(self, port:str):\n NETWORK_STACK.init(port=port)\n NETWORK_STACK.register_listener(self.on_data)\n\n def on_data(self, data: IPv6):\n if mode == Mode.RECEIVER:\n logger.log(\"APP\", f\"Receive {data[UDP][Raw].load.decode()} from {data.src}\")\n else:\n child = self.childs.get(data.src, None)\n if child is None:\n child = Child(self.mode, data)\n self.childs[data.src] = child\n elif self.mode == Mode.PINGPONG:\n child.receive(data)\n\nif __name__ == \"__main__\":\n logger.configure(**LOG_CONFIG)\n logger.enable(\"pyloramac\")\n logger.level(\"APP\", no=26, color=\"\", icon=\"\\U0001F3D3\")\n\n if len(sys.argv) != 3:\n logger.error(\"Usage: lora-root.py \")\n sys.exit(1)\n \n port = sys.argv[1]\n mode = Mode(int(sys.argv[2]))\n logger.log(\"APP\", f\"Mode: {mode}\")\n\n try: \n LoRaRoot(mode).init(port)\n except KeyboardInterrupt:\n logger.info(\"KeyboardInterrupt\")\n logger.info(\"Exiting...\")\n sys.exit(0)\n", "repo_name": "ArnaudPalgen/ma2_memoire", "sub_path": "code/example-udp-rpl-lora/lora-root-node/lora-root.py", "file_name": "lora-root.py", "file_ext": "py", "file_size_in_byte": 3402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 30, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 45, "usage_type": "call"}, {"api_name": "loguru.logger.log", "line_number": 51, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 51, "usage_type": "name"}, {"api_name": "loguru.logger.log", "line_number": 57, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 57, "usage_type": "name"}, {"api_name": "loguru.logger.log", "line_number": 59, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 59, "usage_type": "name"}, {"api_name": "loguru.logger.log", "line_number": 74, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 74, "usage_type": "name"}, {"api_name": "loguru.logger.configure", "line_number": 84, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 84, "usage_type": "name"}, {"api_name": "loguru.logger.enable", "line_number": 85, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 85, "usage_type": "name"}, {"api_name": "loguru.logger.level", "line_number": 86, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 86, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 88, "usage_type": "attribute"}, {"api_name": "loguru.logger.error", "line_number": 89, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 93, "usage_type": "attribute"}, {"api_name": "loguru.logger.log", "line_number": 94, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 94, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 99, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 99, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 100, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 100, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 101, "usage_type": "call"}]} {"seq_id": "27082832453", "text": "from utils.tree_util import create_binary_tree_from_array, inorder\r\n\r\n\r\ndef good_nodes(root):\r\n count = 0\r\n def util(head, max_till_now):\r\n if not head:\r\n return\r\n nonlocal count\r\n if head.val >= max_till_now:\r\n count += 1\r\n max_till_now = head.val\r\n util(head.left, max_till_now)\r\n util(head.right, max_till_now)\r\n util(root, float('-inf'))\r\n return count\r\n\r\narray = [3,1,4,3,None,1,5]\r\nroot = create_binary_tree_from_array(array)\r\nprint('--')\r\nresult = good_nodes(root)\r\nprint(result)\r\n", "repo_name": "sumitpatra6/leetcode_daily_challenges", "sub_path": "august/count_good_nodes.py", "file_name": "count_good_nodes.py", "file_ext": "py", "file_size_in_byte": 569, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "utils.tree_util.create_binary_tree_from_array", "line_number": 19, "usage_type": "call"}]} {"seq_id": "43563789812", "text": "# -*- coding: utf-8 -*-\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nfrom tkinter import simpledialog\r\n\r\nfrom tkinter.colorchooser import *\r\nfrom library.settings_window import *\r\n\r\nimport tkinter.font as tkFont\r\nfrom PIL import Image, ImageTk\r\n\r\n\r\n#imports configparser and images list\r\nfrom library.read_config import *\r\n\r\nsavedFile = {1:\"\"}\r\nimgDict={}\r\n\r\n#======================================\r\n# Principale Window\r\n#======================================\r\n\r\nclass BlockNote_window:\r\n\r\n def __init__(self,master,content,icons_menu):\r\n self.master = master\r\n self.content=content\r\n self.icons_menu=icons_menu\r\n\r\n #initiat style variables\r\n self.all_variable_initiation()\r\n\r\n def create_window(self):\r\n self.master = Tk()\r\n self.master.title(\"Editeur de Texte\")\r\n\r\n # set the dimensions of the screen \r\n # and where it is placed base on conteur values\r\n self.master.geometry('%dx%d+%d+%d' % (int(config_window_width), int(config_window_height), int(config_window_left), int(config_window_top)))\r\n\r\n def add_text(self):\r\n self.content = Text(self.master,padx=10,pady=5, undo=True)\r\n self.content.config(fg=str(BlockNote_window.get_config_default_font_color(self)) ,bg=str(BlockNote_window.get_config_default_bg_color(self)))\r\n self.content.config(font=(config_font_style,config_font_size))\r\n self.content.pack(expand=1,side=BOTTOM,fill='both')\r\n\r\n #import text style from configuration.ini\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n \r\n # justify text -- problem in loads first time - fix later\r\n self.content.tag_configure(\"%s\" % self.alignment, justify=self.alignment.lower())\r\n self.content.tag_add(\"%s\" % self.alignment, 1.0, \"end\")\r\n\r\n def generate(self):\r\n self.master.mainloop()\r\n\r\n #======================================\r\n # Some variables initiation\r\n #======================================\r\n\r\n def all_variable_initiation(self) :\r\n\r\n self.weight_v = config_weight_text\r\n self.slant_v = config_slant_text\r\n self.underline_v = eval(config_underline_text)\r\n self.alignment = config_alignment_text\r\n\r\n #======================================\r\n # BarMenu Actions\r\n #======================================\r\n\r\n # New NotePad winodw\r\n def nouveau(self ,*args):\r\n import os\r\n import sys\r\n # Get the path to current interpreter and run a new window\r\n os.popen(sys.executable + ' main.py')\r\n\r\n\r\n # Open an Existing file\r\n def fopen(self, *args):\r\n file = self.master.filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select File\",filetypes = ((\"Text Files\",\"*.txt\"),(\"all files\",\"*.*\")))\r\n \r\n if file:\r\n self.master.title('{} - {}'.format(os.path.basename(file), \"Editeur de texte\"))\r\n self.content.delete(1.0, END)\r\n with open(file) as _file:\r\n self.content.insert(1.0, _file.read())\r\n\r\n\r\n # Save As Method\r\n def saveAs(self, *args):\r\n # create save dialog\r\n fichier=self.master.filename = filedialog.asksaveasfilename(initialdir = \"/\",title = \"Enregistrer Sous\\\r\n \",filetypes = ((\"Fichier Texte\",\"*.txt\"),(\"Tous les fichiers\",\"*.*\")))\r\n\r\n if fichier:\r\n fichier = fichier + \".txt\"\r\n \r\n savedFile[1] = fichier\r\n f = open(fichier,\"w\")\r\n s = self.content.get(\"1.0\",END)\r\n f.write(s) \r\n f.close()\r\n \r\n # Save Method \r\n def save(self, *args):\r\n if(savedFile[1] ==\"\"):\r\n self.saveAs() \r\n else:\r\n f = open(savedFile[1],\"w\")\r\n s = self.content.get(\"1.0\",END)\r\n f.write(s) \r\n f.close() \r\n\r\n # Exit Method\r\n def quitter(self):\r\n if messagebox.askokcancel(\"Are you sure?\", \"Please Confirm that you want to exit!\"):\r\n self.master.quit()\r\n\r\n # Undo text changes\r\n def undo(self, event=None):\r\n try:\r\n self.content.edit_undo()\r\n except:\r\n print('Nothing to undo...')\r\n\r\n # redo text changes\r\n def redo(self, *args):\r\n try:\r\n self.content.edit_redo()\r\n except:\r\n print('Nothing to redo...')\r\n\r\n # Copy text\r\n def copy(self, *args):\r\n try:\r\n self.content.clipboard_clear()\r\n self.content.clipboard_append(self.content.selection_get())\r\n except:\r\n print('Nothing to copy...')\r\n\r\n # Cut text\r\n def cut(self, *args):\r\n try:\r\n self.copy()\r\n self.content.delete(\"sel.first\",\"sel.last\") \r\n except:\r\n print('Nothing to cut...')\r\n\r\n # Paste Text\r\n def paste(self, *args):\r\n try:\r\n self.content.insert(INSERT, self.content.clipboard_get())\r\n except:\r\n print('Nothing to paste...')\r\n\r\n # Delete Selected text\r\n def clear(self,*args):\r\n sel = self.content.get(SEL_FIRST, SEL_LAST)\r\n if sel!='':\r\n self.content.delete(SEL_FIRST, SEL_LAST)\r\n else:\r\n print(\"Noting to clear\")\r\n\r\n # Delete All text\r\n def clearall(self,*args):\r\n try:\r\n self.content.delete(1.0 , END)\r\n except:\r\n print('Nothing to clear...')\r\n\r\n # Select All text\r\n def selectAll(self, *args):\r\n self.content.tag_add(SEL, '1.0', END)\r\n self.content.mark_set(0.0, END)\r\n self.content.see(INSERT)\r\n\r\n def left_alignment(self , *args):\r\n self.content.tag_configure(\"LEFT\",justify=LEFT)\r\n self.content.tag_add(\"LEFT\", 1.0, \"end\")\r\n\r\n conteur['text_styles']['alignment'] = \"LEFT\"\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def right_alignment(self , *args):\r\n self.content.tag_configure(\"RIGHT\",justify=RIGHT)\r\n self.content.tag_add(\"RIGHT\", 1.0, \"end\")\r\n\r\n conteur['text_styles']['alignment'] = \"RIGHT\"\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def center_alignment(self , *args):\r\n self.content.tag_configure(\"CENTER\",justify=CENTER)\r\n self.content.tag_add(\"CENTER\", 1.0, \"end\")\r\n\r\n conteur['text_styles']['alignment'] = \"CENTER\"\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def bold(self,*args):\r\n\r\n if self.weight_v ==\"bold\":\r\n self.weight_v = \"normal\"\r\n else:\r\n self.weight_v = \"bold\"\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n \r\n def underline(self,*args):\r\n\r\n if self.underline_v:\r\n self.underline_v = 0\r\n else:\r\n self.underline_v = 1\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n\r\n def italic(self,*args):\r\n if self.slant_v ==\"roman\":\r\n self.slant_v = \"italic\"\r\n else:\r\n self.slant_v = \"roman\"\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n\r\n def change_text_style(self, boldness, underline, slant):\r\n styling = tkFont.Font(family=config_font_style, size=config_font_size,weight= boldness,slant=slant, underline=underline)\r\n self.content.configure(font=styling)\r\n\r\n conteur['text_styles']['underline'] = str(underline)\r\n conteur['text_styles']['weight'] = boldness\r\n conteur['text_styles']['slant'] = slant\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def find(self, *args):\r\n self.content.tag_remove('found', '1.0', END)\r\n target = simpledialog.askstring('Search', 'words to search:')\r\n simpledialog\r\n if target:\r\n idx = '1.0'\r\n while 1:\r\n idx = self.content.search(target, idx, nocase=1,\r\n stopindex=END)\r\n if not idx:\r\n break\r\n lastidx = '%s+%dc' % (idx, len(target))\r\n self.content.tag_add('found', idx, lastidx)\r\n idx = lastidx\r\n\r\n self.content.tag_config('found',foreground='white', background=\"blue\")\r\n\r\n def open_about(self, *args):\r\n about_window = Toplevel(self.master)\r\n about_window.grab_set()\r\n about_window.title(\"About Me\")\r\n about_window.geometry(\"250x150\")\r\n about_window.resizable(False, False)\r\n about_window.configure(background='white')\r\n\r\n\r\n aboutme_icon = Image.open( other_icons.get('aboutme') )\r\n aboutme_icon = aboutme_icon.resize((100,100))\r\n aboutme_img = ImageTk.PhotoImage(aboutme_icon)\r\n aboutme_label = Label(about_window, image=aboutme_img,borderwidth=0, bg=\"white\")\r\n imgDict['aboutme'] = aboutme_img \r\n aboutme_label.pack()\r\n\r\n my_name= Label(about_window, text=\"SAIFEDDINE CHAGDALI\",fg=\"black\", bg=\"white\", font=('Calibri',10))\r\n my_name.place(x=60, y=100)\r\n\r\n git_link= Label(about_window, text=\"github.com/sifdin17/TextEditor\",fg=\"blue\", bg=\"white\",font=('Calibri',10))\r\n git_link.place(x=40, y=120)\r\n\r\n\r\n\r\n #======================================\r\n # action that opens the setting window\r\n #======================================\r\n\r\n def settings_w(self, *args):\r\n\r\n # New Settings Instance\r\n preference_window = Settings_class(self.master,self.content,\"Settings_win\")\r\n # Create all widgets\r\n preference_window.create_settings_window()\r\n\r\n\r\n #===========================\r\n # BarMenu Creation\r\n #===========================\r\n\r\n def add_menu(self):\r\n # 1 - Création de la barre des menus\r\n menuBar = Menu(self.master)\r\n \r\n # 2 - Création du menu Fichier\r\n global menuFichier\r\n menuFichier = Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"File\", menu=menuFichier)\r\n menuFichier.add_command(label=\"New\", command=self.nouveau)\r\n menuFichier.add_command(label=\"Open\", command=self.fopen)\r\n menuFichier.add_command(label=\"Save\", command=self.save)\r\n menuFichier.add_command(label=\"Save as\", command=self.saveAs)\r\n menuFichier.add_separator()\r\n menuFichier.add_command(label=\"Exit\", command = self.quitter) \r\n self.master.config(menu = menuBar)\r\n \r\n #3 - Création du Menu Edition\r\n global menuEdition\r\n menuEdition= Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"Edit\", menu=menuEdition)\r\n menuEdition.add_command(label=\"Undo\", command = self.undo)\r\n menuEdition.add_command(label=\"Redo\", command = self.redo)\r\n menuEdition.add_separator()\r\n menuEdition.add_command(label=\"Copy\", command=self.copy)\r\n menuEdition.add_command(label=\"Cut\", command = self.cut)\r\n menuEdition.add_command(label=\"Paste\", command=self.paste)\r\n menuEdition.add_separator()\r\n menuEdition.add_command(label=\"Delete\",command=self.clear)\r\n menuEdition.add_command(label=\"Delete All\",command=self.clearall)\r\n menuEdition.add_command(label=\"Select All\",command=self.selectAll)\r\n\r\n \r\n # Création du Menu Options\r\n menuOutils = Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"Tools\", menu = menuOutils)\r\n\r\n global show_icons_menucheck, show_shortcuts_menucheck\r\n\r\n show_icons_menucheck = IntVar()\r\n show_icons_menucheck.set(config_show_icons)\r\n menuOutils.add_checkbutton(label='Show Icons',onvalue=1, offvalue=0, variable=show_icons_menucheck, command=self.toggle_icons)\r\n\r\n show_shortcuts_menucheck = IntVar()\r\n show_shortcuts_menucheck.set(config_show_shortcuts)\r\n menuOutils.add_checkbutton(label='Show Shortcuts',onvalue=True, offvalue=False, variable=show_shortcuts_menucheck, command=self.toggle_shortcuts)\r\n\r\n\r\n menuOutils.add_separator()\r\n menuOutils.add_command(label=\"Settings\", command=self.settings_w)\r\n \r\n # Création du Menu Aide\r\n menuAide = Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"Help\", menu = menuAide)\r\n menuAide.add_command(label=\"About\",command=self.open_about)\r\n\r\n if config_show_shortcuts:\r\n BlockNote_window.add_shortcuts_to_menu(self)\r\n\r\n #===========================\r\n # BarMenu Checkbuttons Action\r\n #===========================\r\n\r\n def toggle_shortcuts(self):\r\n if show_shortcuts_menucheck.get():\r\n BlockNote_window.add_shortcuts_to_menu(self)\r\n else:\r\n BlockNote_window.remove_shortcuts_from_menu(self)\r\n\r\n conteur['menu_settings']['show_shortcuts'] = str( show_shortcuts_menucheck.get() )\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def add_shortcuts_to_menu(self, *args):\r\n\r\n menuFichier.entryconfig(0, accelerator='Ctrl+N')\r\n menuFichier.entryconfig(1, accelerator='Ctrl+O')\r\n menuFichier.entryconfig(2, accelerator='Ctrl+S')\r\n\r\n menuEdition.entryconfig(0, accelerator='Ctrl+Z')\r\n menuEdition.entryconfig(1, accelerator='Ctrl+Y')\r\n menuEdition.entryconfig(3, accelerator='Ctrl+C')\r\n menuEdition.entryconfig(4, accelerator='Ctrl+X')\r\n menuEdition.entryconfig(5, accelerator='Ctrl+V')\r\n menuEdition.entryconfig(9, accelerator='Ctrl+A')\r\n\r\n # bind shortcuts to menuBar\r\n\r\n self.content.bind('', self.nouveau)\r\n self.content.bind('', self.nouveau)\r\n self.content.bind('', self.fopen)\r\n self.content.bind('', self.fopen)\r\n self.content.bind('', self.save)\r\n self.content.bind('', self.save)\r\n self.content.bind('', self.undo)\r\n self.content.bind('', self.undo)\r\n self.content.bind('', self.redo)\r\n self.content.bind('', self.redo)\r\n self.content.bind('', self.copy)\r\n self.content.bind('', self.copy)\r\n self.content.bind('', self.cut)\r\n self.content.bind('', self.cut)\r\n self.content.bind('', self.paste)\r\n self.content.bind('', self.paste)\r\n self.content.bind('', self.selectAll)\r\n self.content.bind('', self.selectAll)\r\n\r\n def remove_shortcuts_from_menu(self, *args):\r\n menuFichier.entryconfig(0, accelerator='')\r\n menuFichier.entryconfig(1, accelerator='')\r\n menuFichier.entryconfig(2, accelerator='')\r\n\r\n menuEdition.entryconfig(0, accelerator='')\r\n menuEdition.entryconfig(1, accelerator='')\r\n menuEdition.entryconfig(3, accelerator='')\r\n menuEdition.entryconfig(4, accelerator='')\r\n menuEdition.entryconfig(5, accelerator='')\r\n menuEdition.entryconfig(9, accelerator='')\r\n\r\n \"\"\"# unbind shortcuts from menuBar \r\n self.content.unbind('', self.nouveau)\r\n self.content.unbind('', self.nouveau)\r\n self.content.unbind('', self.fopen)\r\n self.content.unbind('', self.fopen)\r\n self.content.unbind('', self.save)\r\n self.content.unbind('', self.save)\r\n self.content.unbind('', self.undo)\r\n self.content.unbind('', self.undo)\r\n self.content.unbind('', self.redo)\r\n self.content.unbind('', self.redo)\r\n self.content.unbind('', self.copy)\r\n self.content.unbind('', self.copy)\r\n self.content.unbind('', self.cut)\r\n self.content.unbind('', self.cut)\r\n self.content.unbind('', self.paste)\r\n self.content.unbind('', self.paste)\r\n self.content.unbind('', self.selectAll)\r\n self.content.unbind('', self.selectAll)\"\"\"\r\n\r\n def add_icons_menu(self):\r\n self.icons_menu=Frame(height=10,borderwidth=0, padx=5, pady=0)\r\n self.icons_menu.pack(side=TOP,fill=X)\r\n\r\n def add_icons(self):\r\n i = 0\r\n for path, bind_function in menu_icons_list.items():\r\n\r\n load = Image.open(path)\r\n load = load.resize((16,16))\r\n img = ImageTk.PhotoImage(load)\r\n\r\n label_name = bind_function+\"_label\" # problem d smya -- fix later !\r\n\r\n label_name = Label(self.icons_menu,cursor=\"hand2\", image=img)\r\n imgDict[path] = img # save image ref in imgDict -- keep track of the reference or else it wont work!!! \r\n label_name.pack(side=LEFT,padx=2,pady=5)\r\n\r\n bind_function = \"self.\"+bind_function \r\n label_name.bind( \"