diff --git "a/1300.jsonl" "b/1300.jsonl" new file mode 100644--- /dev/null +++ "b/1300.jsonl" @@ -0,0 +1,485 @@ +{"seq_id": "210882904", "text": "import os, sys\nROOT_DIR = '/home/surromind/shmoon/maskrcnn/aktwelve_mask_rcnn/'\n\nassert os.path.exists(ROOT_DIR), 'ROOT_DIR does not exist. Did you forget to read the instructions above? ;)'\nsys.path.append(ROOT_DIR) \n\nfrom mrcnn.config import Config\nimport mrcnn.utils as utils\nfrom mrcnn import visualize\nimport mrcnn.model as modellib\nimport json\n\nclass CocoLikeDataset(utils.Dataset):\n \"\"\" Generates a COCO-like dataset, i.e. an image dataset annotated in the style of the COCO dataset.\n See http://cocodataset.org/#home for more information.\n \"\"\"\n def load_data(self, annotation_json, images_dir):\n \"\"\" Load the coco-like dataset from json\n Args:\n annotation_json: The path to the coco annotations json file\n images_dir: The directory holding the images referred to by the json file\n \"\"\"\n # Load json from file\n json_file = open(annotation_json)\n coco_json = json.load(json_file)\n json_file.close()\n \n # Add the class names using the base method from utils.Dataset\n source_name = \"coco_like\"\n for category in coco_json['categories']:\n class_id = category['id']\n class_name = category['name']\n if class_id < 1:\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(class_name))\n return\n \n self.add_class(source_name, class_id, class_name)\n \n # Get all annotations\n annotations = {}\n for annotation in coco_json['annotations']:\n image_id = annotation['image_id']\n if image_id not in annotations:\n annotations[image_id] = []\n annotations[image_id].append(annotation)\n \n # Get all images and add them to the dataset\n seen_images = {}\n for image in coco_json['images']:\n image_id = image['id']\n if image_id in seen_images:\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\n else:\n seen_images[image_id] = image\n try:\n image_file_name = image['file_name']\n image_width = image['width']\n image_height = image['height']\n except KeyError as key:\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\n \n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\n image_annotations = annotations[image_id]\n \n # Add the image using the base method from utils.Dataset\n self.add_image(\n source=source_name,\n image_id=image_id,\n path=image_path,\n width=image_width,\n height=image_height,\n annotations=image_annotations\n )\n \n def load_mask(self, image_id):\n \"\"\" Load instance masks for the given image.\n MaskRCNN expects masks in the form of a bitmap [height, width, instances].\n Args:\n image_id: The id of the image to load masks for\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n image_info = self.image_info[image_id]\n annotations = image_info['annotations']\n instance_masks = []\n class_ids = []\n \n for annotation in annotations:\n class_id = annotation['category_id']\n mask = Image.new('1', (image_info['width'], image_info['height']))\n mask_draw = ImageDraw.ImageDraw(mask, '1')\n for segmentation in annotation['segmentation']:\n mask_draw.polygon(segmentation, fill=1)\n bool_array = np.array(mask) > 0\n instance_masks.append(bool_array)\n class_ids.append(class_id)\n\n mask = np.dstack(instance_masks)\n class_ids = np.array(class_ids, dtype=np.int32)\n \n return mask, class_ids", "sub_path": "cigar_test/cocolikedataset.py", "file_name": "cocolikedataset.py", "file_ext": "py", "file_size_in_byte": 4261, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.exists", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "mrcnn.utils.Dataset", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mrcnn.utils", "line_number": 13, "usage_type": "name"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "530841267", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nimport wagtail.wagtailcore.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n ('wagtailimages', '0006_add_verbose_names'),\n ('home', '0015_facultypage'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AdmissionPage',\n fields=[\n ('page_ptr', models.OneToOneField(primary_key=True, to='wagtailcore.Page', parent_link=True, auto_created=True, serialize=False)),\n ('subsection_title', models.CharField(default='title goes here', help_text='maximum length of 30 characters', max_length=30)),\n ('subsection_subtitle', models.CharField(default='subtitle goes here', help_text='maximum length of 100 characters', max_length=100)),\n ('body', wagtail.wagtailcore.fields.RichTextField(default='About admission....')),\n ('main_image', models.ForeignKey(related_name='+', to='wagtailimages.Image', on_delete=django.db.models.deletion.SET_NULL, null=True, help_text='Image size must be 750 x 300 (width x height)')),\n ],\n options={\n 'abstract': False,\n },\n bases=('wagtailcore.page',),\n ),\n ]\n", "sub_path": "source/home/migrations/0016_admissionpage.py", "file_name": "0016_admissionpage.py", "file_ext": "py", "file_size_in_byte": 1418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.fields.wagtailcore.fields.RichTextField", "line_number": 24, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.fields.wagtailcore", "line_number": 24, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.db", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "54125953", "text": "from keras.models import Sequential\nfrom keras.layers import Dense\nimport numpy\n# fix random seed for reproducibility\n# numpy.random.seed(7)\n\n# load pima indians dataset\ndataset = numpy.loadtxt(\"noMoreHeadaches_dummy.csv\", delimiter=\",\")\n# split into input (X) and output (Y) variables\nX = dataset[:,0:34]\nY = dataset[:,35:39]\n\n# create model\nmodel = Sequential()\nmodel.add(Dense(12, input_dim=34, activation='relu'))\nmodel.add(Dense(12, activation='relu'))\nmodel.add(Dense(8, activation='relu'))\nmodel.add(Dense(4, activation='sigmoid'))\n\nmodel.summary()\n\n# Compile model\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# Fit the model\nmodel.fit(X, Y, epochs=20, batch_size=10)\n\n# evaluate the model\nscores = model.evaluate(X, Y)\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n# predict_dataset = numpy.loadtxt(\"noMoreHeadachesPredict.csv\", delimiter=\",\")\n# print(predict_dataset.shape)\n# predict_X = predict_dataset[0:34]\n# print(predict_X.shape)\n#\n# prediction = model.predict(predict_X, 34, 0)\n\n# numpy.savetxt(\"noMoreHeadachesPredicted.csv\", prediction, delimiter=\",\")\n", "sub_path": "docs/python/noMoreHeadaches.py", "file_name": "noMoreHeadaches.py", "file_ext": "py", "file_size_in_byte": 1127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.loadtxt", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "229716692", "text": "#!/Users/ding/miniconda3/bin/python\n\nfrom argparse import ArgumentParser as ap\n\nfrom mcmc_funs import fit_BAO\n\nif __name__ == '__main__': \n parser = ap(description='Use mcmc routine to get the BAO peak stretching parameter alpha'\\\n +', damping parameter A, amplitude parameter B.')\n parser.add_argument(\"--kmin\", help = 'kmin fit boundary.', required=True)\n parser.add_argument(\"--kmax\", help = 'kmax fit boundary.', required=True)\n parser.add_argument(\"--params_str\", help = 'Set fitting parameters. 1: free; 0: fixed.', required=True)\n parser.add_argument(\"--Pk_type\", help = \"The type of P(k) to be fitted. Pwig: wiggled P(k)\"\\\n +\"with BAO; (Pwnw: Pwig-Pnow? Maybe it's not necessary.\", required=True)\n args = parser.parse_args()\n fit_BAO(args)\n", "sub_path": "codes_mehdi/mcmc_fit_p0_BAO.py", "file_name": "mcmc_fit_p0_BAO.py", "file_ext": "py", "file_size_in_byte": 838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "mcmc_funs.fit_BAO", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "379131200", "text": "import crud\nimport ctx\nimport templates\nimport json\nfrom . import hydra\n\ndef dumper(obj):\n try:\n return obj.toJSON()\n except:\n return obj.__dict__\n\n\nclass APIDocumentation(crud.CRUD):\n data = {}\n location = \"/api/vocab\"\n\n def getAll(self):\n content = templates.render({\n \"base_url\": ctx.base_url\n },\"doc.json\")\n api = json.loads(content)\n api[\"supportedClass\"] += [obj.toJSON() for obj in hydra.get_classes()]\n\n\n for classObj in hydra.get_entrypoint_classes():\n for apiClass in api[\"supportedClass\"]:\n if apiClass[\"@id\"] == \"vocab:EntryPoint\":\n apiClass[\"supportedProperty\"].append(classObj.getEntryPointDoc())\n break\n\n api = json.loads(json.dumps(api, default=dumper, indent=2))\n return ctx.success(api, 200, headers = hydra.LINK_HEADER)\n\ncrud.register_dynamic(lambda: APIDocumentation())", "sub_path": "Assignment4/resources/hydra_api/api_doc.py", "file_name": "api_doc.py", "file_ext": "py", "file_size_in_byte": 950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "crud.CRUD", "line_number": 14, "usage_type": "attribute"}, {"api_name": "templates.render", "line_number": 19, "usage_type": "call"}, {"api_name": "ctx.base_url", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "ctx.success", "line_number": 33, "usage_type": "call"}, {"api_name": "crud.register_dynamic", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "408713689", "text": "\"\"\"--------------------------------------------------------------------------------------------------------------------------------------\nMODULE\n BrokerNoteConfirmationXMLHooks\n\nDESCRIPTION\n This module contains any hooks used to populate the confirmation XML template\n for the broker note functionality.\n\n-----------------------------------------------------------------------------------------------------------------------------------------\nHISTORY\n=========================================================================================================================================\nDate Change no Developer Requester Description\n-----------------------------------------------------------------------------------------------------------------------------------------\n2018-08-28 FAOPS-61 Stuart Wilson Capital Markets Initial Implementation.\n2020-02-10 FAOPS-725 Cuen Edwards Kgomotso Gumbo Minor refactoring.\n-----------------------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nfrom datetime import datetime\n\nimport BrokerNoteGeneral\nfrom BrokerNoteXMLGenerator import BrokerNoteXMLGenerator, GenerateBrokerNoteXMLRequest, GenerateCustodianAccountDetails\nimport DocumentGeneral\nimport DocumentConfirmationGeneral\nfrom EmailBodyHTMLGenerator import EmailBodyHTMLGenerator, GenerateEmailBodyHTMLRequest\n\n\nemail_body_html_generator = EmailBodyHTMLGenerator()\nxml_generator = BrokerNoteXMLGenerator()\n\n\ndef get_email_file_name(confirmation):\n \"\"\"\n Get the file name to be given to a broker note email attachment.\n \"\"\"\n # Security name.\n instrument = confirmation.Trade().Instrument()\n security_name = instrument.Name()\n if instrument.Underlying():\n security_name = instrument.Underlying().Name()\n # Counterparty name.\n counterparty_name = DocumentGeneral.get_party_full_name_and_short_code(confirmation.Counterparty())\n # Create file name.\n file_name_template = 'Transaction Notice {security_name} {counterparty_name} {date_today}'\n file_name = file_name_template.format(\n security_name=security_name,\n counterparty_name=counterparty_name,\n date_today=datetime.today().strftime('%d%m%y')\n )\n return file_name\n\n\ndef get_email_from(confirmation):\n \"\"\"\n Get the From email address to use for delivery of a broker note.\n \"\"\"\n return DocumentConfirmationGeneral.get_default_confirmation_email_from(confirmation)\n\n\ndef get_email_to(confirmation):\n \"\"\"\n Get the To email address to use for delivery of a broker note.\n \"\"\"\n return DocumentConfirmationGeneral.get_default_confirmation_email_to(confirmation)\n\n\ndef get_email_bcc(confirmation):\n \"\"\"\n Get any email address to be BCC'ed when delivering a broker note.\n \"\"\"\n return DocumentConfirmationGeneral.get_default_confirmation_email_bcc(confirmation)\n\n\ndef get_email_subject(confirmation):\n \"\"\"\n Get the email subject to be used when delivering a broker note.\n \"\"\"\n return get_email_file_name(confirmation) + '.pdf'\n\n\ndef get_email_body(confirmation):\n \"\"\"\n Get the email body to be used when delivering a broker note.\n \"\"\"\n document_description = 'your {document_type}'\n document_description = document_description.format(\n document_type='Transaction Notice')\n request = GenerateEmailBodyHTMLRequest(\n confirmation.AcquirerContactRef().Attention(),\n confirmation.AcquirerContactRef().Telephone(),\n get_email_from(confirmation),\n document_description\n )\n return email_body_html_generator.generate_html(request)\n\n\ndef get_document_xml(confirmation):\n \"\"\"\n Create the document XML for a broker note.\n \"\"\"\n # Prevent the generation of XML for a non-broker note confirmation.\n DocumentConfirmationGeneral.validate_confirmation_for_event(confirmation, BrokerNoteGeneral\n .get_broker_note_event_name())\n custodian_details = GenerateCustodianAccountDetails(confirmation)\n request = GenerateBrokerNoteXMLRequest(confirmation, custodian_details)\n return xml_generator.generate_xml(request)\n", "sub_path": "Extensions/ABSA Documentation/FPythonCode/BrokerNoteConfirmationXMLHooks.py", "file_name": "BrokerNoteConfirmationXMLHooks.py", "file_ext": "py", "file_size_in_byte": 4227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "EmailBodyHTMLGenerator.EmailBodyHTMLGenerator", "line_number": 28, "usage_type": "call"}, {"api_name": "BrokerNoteXMLGenerator.BrokerNoteXMLGenerator", "line_number": 29, "usage_type": "call"}, {"api_name": "DocumentGeneral.get_party_full_name_and_short_code", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "DocumentConfirmationGeneral.get_default_confirmation_email_from", "line_number": 57, "usage_type": "call"}, {"api_name": "DocumentConfirmationGeneral.get_default_confirmation_email_to", "line_number": 64, "usage_type": "call"}, {"api_name": "DocumentConfirmationGeneral.get_default_confirmation_email_bcc", "line_number": 71, "usage_type": "call"}, {"api_name": "EmailBodyHTMLGenerator.GenerateEmailBodyHTMLRequest", "line_number": 88, "usage_type": "call"}, {"api_name": "DocumentConfirmationGeneral.validate_confirmation_for_event", "line_number": 102, "usage_type": "call"}, {"api_name": "BrokerNoteGeneral.get_broker_note_event_name", "line_number": 102, "usage_type": "call"}, {"api_name": "BrokerNoteXMLGenerator.GenerateCustodianAccountDetails", "line_number": 104, "usage_type": "call"}, {"api_name": "BrokerNoteXMLGenerator.GenerateBrokerNoteXMLRequest", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "601108195", "text": "import win32com.client # win32com 모듈 import\r\nimport psycopg2 # postgreSQL 연동 모듈 import\r\n \r\n# 연결 여부 체크\r\nobjCpCybos = win32com.client.Dispatch(\"CpUtil.CpCybos\")\r\nbConnect = objCpCybos.IsConnect\r\nprint(bConnect)\r\nif (bConnect == 0):\r\n print(\"PLUS가 정상적으로 연결되지 않음. \")\r\n exit()\r\n \r\n# 종목코드 리스트 구하기\r\nobjCpCodeMgr = win32com.client.Dispatch(\"CpUtil.CpCodeMgr\")\r\ncodeList = objCpCodeMgr.GetStockListByMarket(1) #거래소\r\ncodeList2 = objCpCodeMgr.GetStockListByMarket(2) #코스닥\r\n \r\nitem_code = {}\r\n \r\nprint(\"거래소 종목코드\", len(codeList))\r\nfor i, code in enumerate(codeList[:5]):\r\n secondCode = objCpCodeMgr.GetStockSectionKind(code)\r\n name = objCpCodeMgr.CodeToName(code)\r\n stdPrice = objCpCodeMgr.GetStockStdPrice(code)\r\n print(i, code, secondCode, stdPrice, name)\r\n \r\nprint(\"코스닥 종목코드\", len(codeList2))\r\nfor i, code in enumerate(codeList2):\r\n secondCode = objCpCodeMgr.GetStockSectionKind(code)\r\n name = objCpCodeMgr.CodeToName(code)\r\n stdPrice = objCpCodeMgr.GetStockStdPrice(code)\r\n if name in ['셀트리온', '신라젠']: # -->> 원하는 종목(기업) 이름 입력하면 됨\r\n item_code[name] = code \r\n print(i, code, secondCode, stdPrice, name)\r\n \r\n# 전체 종목코드 개수 \r\n# print(\"거래소 + 코스닥 종목코드 \",len(codeList) + len(codeList2))\r\nprint(item_code)\r\n \r\n'''결과\r\n거래소 종목코드 1363\r\n0 A000020 1 9270 동화약품\r\n1 A000030 1 17000 우리은행\r\n2 A000040 1 396 KR모터스\r\n3 A000050 1 13900 경방\r\n4 A000060 1 26300 메리츠화재\r\n코스닥 종목코드 1257\r\n603 A068270 1 178300 셀트리온\r\n1109 A215600 1 60200 신라젠\r\n{'신라젠': 'A215600', '셀트리온': 'A068270'}\r\n'''\r\n# 일자별 데이터 호출 함수\r\ndef ReqeustData(obj, name, code):\r\n # 데이터 요청\r\n obj.BlockRequest()\r\n \r\n # 통신 결과 확인\r\n rqStatus = obj.GetDibStatus()\r\n rqRet = obj.GetDibMsg1()\r\n# print(\"통신상태\", rqStatus, rqRet)\r\n if rqStatus != 0:\r\n return False\r\n \r\n # 일자별 정보 데이터 처리\r\n count = obj.GetHeaderValue(1) # 데이터 개수\r\n \r\n temp_data = []\r\n for i in range(count):\r\n date = obj.GetDataValue(0, i) # 일자\r\n open = obj.GetDataValue(1, i) # 시가\r\n high = obj.GetDataValue(2, i) # 고가\r\n low = obj.GetDataValue(3, i) # 저가\r\n close = obj.GetDataValue(4, i) # 종가\r\n diff = obj.GetDataValue(5, i) # \r\n vol = obj.GetDataValue(6, i) # 거래량\r\n \r\n year = slice(0,4)\r\n month = slice(4,6)\r\n day = slice(6,8)\r\n date = str(date)\r\n date_time = '{0}-{1}-{2}'.format(date[year], date[month], date[day])\r\n \r\n# print(date, open, high, low, close, diff, vol)\r\n stock_data.append((code, date_time, open, high, low, close, diff, vol, name))\r\n return temp_data\r\n \r\nstock_data = []\r\n\r\n# 일자별 object 구하기\r\nobjStockWeek = win32com.client.Dispatch(\"DsCbo1.StockWeek\")\r\n \r\nfor name, code in item_code.items():\r\n objStockWeek.SetInputValue(0, code) #종목 코드 - 셀트리온:A068270, 신라젠:A215600\r\n # 최초 데이터 요청\r\n ret = ReqeustData(objStockWeek, name, code)\r\n stock_data += ret\r\n # 연속 데이터 요청\r\n # 예제는 5번만 연속 통신 하도록 함.\r\n NextCount = 1\r\n while objStockWeek.Continue: #연속 조회처리\r\n NextCount+=1\r\n if NextCount > 6 :\r\n break\r\n ret = ReqeustData(objStockWeek, name, code)\r\n stock_data += ret\r\n if ret == False:\r\n exit()\r\n\r\n \r\n# Connect to an existing database\r\nhost = 'localhost'\r\ndbname = 'postgres'\r\nuser = 'postgres'\r\npwd = '0124'\r\nconn = psycopg2.connect('host={0} dbname={1} user={2} password={3}'.format(host, dbname, user, pwd))\r\ncur = conn.cursor()\r\n \r\n\r\n# cur.executemany(\"INSERT INTO daily_stock_price(code, date, open, high, low, close, diff, volume, name) \\\r\n# VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\", stock_data)\r\n# conn.commit()\r\n\r\n# cur.execute(\"select * from daily_stock_price\")\r\n\r\n\r\ncur.execute(\"select * from daily_stock_price\")\r\nresult = cur.fetchall()\r\n# print(result)\r\n\r\nfor row in result:\r\n print(row)\r\n", "sub_path": "copy_itemcode.py", "file_name": "copy_itemcode.py", "file_ext": "py", "file_size_in_byte": 4290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "win32com.client.client.Dispatch", "line_number": 5, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 5, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 5, "usage_type": "name"}, {"api_name": "win32com.client.client.Dispatch", "line_number": 13, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 13, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 13, "usage_type": "name"}, {"api_name": "win32com.client.client.Dispatch", "line_number": 89, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 89, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 89, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "572320490", "text": "#-*-conding:utf-8-*-\n#@Time :2020/2/516:54\n#@Author:caowanwan\n#@Email:1127825076@qq.com\n#@File:do_mysql.py\nimport pymysql\nfrom API6.common.read_config import ReadConfig\nfrom API6.common import project_path\n\nclass DoMysql:\n def do_mysql(self,query,flag=1):\n '''query:表示sql查询语句\n flag:1表示查询结果有以条,2表示查询结果有 多条'''\n db_config1 =ReadConfig(project_path.config_path).get_other('DB','db_config1')#从配置文件获取数据库链接的ip等\n # cnn = pymysql.connect(host=\"test.lemonban.com\",\n # user=\"test\",\n # password=\"test\",\n # db=\"future\",\n # port=3306,\n # charset=\"utf8\",\n # cursorclass=pymysql.cursors.DictCursor)\n cnn=pymysql.connect(**db_config1)\n cursor = cnn.cursor()\n cursor.execute(query) # 查询不需要commit\n # 插入更新删除需要手动提交\n cnn.commit()\n\n if flag==1:\n res = cursor.fetchone() # 返回的元组\n #print('查询数据库结果为:{}'.format(res))\n else:\n res = cursor.fetchall() # 返回的元组嵌套元组!!((140066,), (140067,), (140068,), (140071,), (140086,), (140087,))\n #print('查询数据库结果为:{}'.format(res))\n # 4.关闭连接\n cursor.close() # 先关游标对象\n cnn.close() # 然后再关连接对象\n\n return res\n # #增删改数据库,update\n # update=\"update member set RegName='caowanwan' where id='1139812'\"\n # cursor.execute(update)\n # cursor.execute('commit')#提交\n\nif __name__=='__main__':\n query=\"select id from loan where MemberID='293366';\" #\"select min(id) from member where id>'1139813'# \"\n sq=DoMysql().do_mysql(query,2)\n print('查询数据库结果为:{}'.format(sq))\n print('查询数据库结果为:{}'.format(sq[0]))\n", "sub_path": "API6/common/do_pymysql.py", "file_name": "do_pymysql.py", "file_ext": "py", "file_size_in_byte": 1990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "API6.common.read_config.ReadConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "API6.common.project_path.config_path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "API6.common.project_path", "line_number": 14, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "113718564", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of svgis.\n# https://github.com/fitnr/svgis\n\n# Licensed under the GNU General Public License v3 (GPLv3) license:\n# http://opensource.org/licenses/GPL-3.0\n# Copyright (c) 2016, Neil Freeman \nimport unittest\ntry:\n import xml.etree.cElementTree as ElementTree\nexcept ImportError:\n import xml.etree.ElementTree as ElementTree\n\nimport tinycss\nfrom svgis import dom\n\n\nclass DomTestCase(unittest.TestCase):\n\n parser = tinycss.make_parser()\n\n svg = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n\n css = \"\"\"polygon {fill: orange;}\n .test { stroke: green; }\n polyline { stroke: blue}\n .test, #baz { stroke-width: 2; }\n #test ~ #foo { fill: purple; }\n #cat polyline { fill: red }\n .squa.tront { stroke-opacity: 0.50; }\n \"\"\"\n\n def setUp(self):\n self.rules = self.parse(self.css).rules\n\n def parse(self, css):\n return self.parser.parse_stylesheet(css)\n\n def document(self):\n return ElementTree.fromstring(self.svg).find('./{http://www.w3.org/2000/svg}g')\n\n def testApplyRule(self):\n document = self.document()\n dom.apply_rule(document, self.rules[0])\n\n polygon = document.find('.//{http://www.w3.org/2000/svg}polygon')\n\n dom.apply_rule(document, self.rules[6])\n\n polyline = document.find(\".//*[@id='meow']\")\n\n try:\n self.assertIn('fill:orange', polygon.attrib['style'])\n self.assertIn('stroke-opacity:0.50', polyline.attrib.get('style', ''))\n\n except AssertionError:\n print(ElementTree.tostring(polygon, encoding='utf-8'))\n print(ElementTree.tostring(polyline, encoding='utf-8'))\n raise\n\n def testProcessTokens(self):\n document = self.document()\n asterisk = '* { fill: tan; }'\n rule = self.parse(asterisk).rules[0]\n\n els, toks = dom._process_tokens(document, None, rule.selector)\n assert toks == []\n assert els == document.findall('.//')\n\n def testChildToken(self):\n css = \"#cat>.squa { stroke: green }\"\n rules = self.parse(css).rules\n document = self.document()\n dom.apply_rule(document, rules[0])\n polyline = document.find(\".//*[@id='meow']\")\n self.assertIn('stroke:green', polyline.attrib.get('style', ''))\n\n def testBuildTokenList(self):\n css = \"\"\"\n #foo[name=foo] {}\n .foo~.squa {}\n .pizza::first-child {}\n .salad>kale {}\n \"\"\"\n rules = self.parse(css).rules\n\n for r in rules:\n parsed = [getattr(t, 'value', '') for t in r.selector]\n built = dom._build_tokenlist(r.selector)\n self.assertEqual(parsed, [getattr(t, 'value', '') for t in built[0]])\n\n def testStyleDecoding(self):\n assert dom._style_dict('fill:none;') == {'fill': 'none'}\n self.assertEqual(dom._style_dict('fill:none; stroke : 3px ; '), {'fill': 'none', 'stroke': '3px'})\n assert dom._style_string({'fill': 'none'}) == 'fill:none'\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "tests/test_dom.py", "file_name": "test_dom.py", "file_ext": "py", "file_size_in_byte": 3823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unittest.TestCase", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tinycss.make_parser", "line_number": 22, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 56, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 56, "usage_type": "name"}, {"api_name": "svgis.dom.apply_rule", "line_number": 60, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 60, "usage_type": "name"}, {"api_name": "svgis.dom.apply_rule", "line_number": 64, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 64, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 73, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 73, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 74, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 74, "usage_type": "name"}, {"api_name": "svgis.dom._process_tokens", "line_number": 82, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 82, "usage_type": "name"}, {"api_name": "svgis.dom.apply_rule", "line_number": 90, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 90, "usage_type": "name"}, {"api_name": "svgis.dom._build_tokenlist", "line_number": 105, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 105, "usage_type": "name"}, {"api_name": "svgis.dom._style_dict", "line_number": 109, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 109, "usage_type": "name"}, {"api_name": "svgis.dom._style_dict", "line_number": 110, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 110, "usage_type": "name"}, {"api_name": "svgis.dom._style_string", "line_number": 111, "usage_type": "call"}, {"api_name": "svgis.dom", "line_number": 111, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "448249493", "text": "# maputils.py - some utilities to aid with map-drawing\nimport cairo\n\n# convert-point - convert a point from geographic to canvas coordinates\n# bounds is the geographic extent of the canvas in the format\n# of (long_1, long_2, lat_1, lat_2)\n# pt is (lat, long).\n# returns (x,y) coordinates where x and y are between 0 and 1.\ndef convert_point(bounds, pt):\n (long_1, long_2, lat_1, lat_2) = bounds\n x = (pt[1] - long_1) / (long_2 - long_1)\n y = (pt[0] - lat_1) / (lat_2 - lat_1)\n return (x, y)\n\n# init_canvas - initialize a cairo canvas and context\n# The canvas is of size (w, h), and is optionally cleared to\n# the given color.\n# The transform matrix is set up such that coordinates range\n# from 0 to 1 in each dimension.\ndef init_canvas(w, h, clearcolor = None):\n surf = cairo.ImageSurface(cairo.FORMAT_RGB24, w, h)\n ctx = cairo.Context(surf)\n matrix = cairo.Matrix(xx = w, yy = -h, x0 = 0.0, y0 = h)\n ctx.set_matrix(matrix)\n ctx.set_line_width(0.001)\n\n if clearcolor:\n ctx.move_to(0, 0)\n ctx.line_to(0, 1)\n ctx.line_to(1, 1)\n ctx.line_to(1, 0)\n ctx.close_path()\n\n ctx.set_source_rgb(*clearcolor)\n ctx.fill()\n\n return (ctx, surf)\n", "sub_path": "maputils.py", "file_name": "maputils.py", "file_ext": "py", "file_size_in_byte": 1213, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cairo.ImageSurface", "line_number": 21, "usage_type": "call"}, {"api_name": "cairo.FORMAT_RGB24", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cairo.Context", "line_number": 22, "usage_type": "call"}, {"api_name": "cairo.Matrix", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "403263384", "text": "# Implemented by Emmanuel Jojoe Ainoo (LOGISTICS REGRESSION CLASSFIER)\n\nimport sys\nimport sklearn\nfrom sklearn.datasets import load_files\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix\n\n\n# Initializing and Managing Datasets\nprint(\"---> Initializing and Managing Datasets\")\nprint(\" \")\n\ndef ManageTrainData(trainDoc):\n data = []\n with open(trainDoc) as f:\n for doc in f:\n data.append(doc)\n return data\n\ndef ManageTrainClass(trainClass):\n data_labels = []\n with open(trainClass) as file:\n for c in file:\n data_labels.append(c)\n return data_labels\n\n# Vectorizing Data for Normalized\nprint(\"---> Transforming/Vectorizing Data(Normalized Version) \")\nprint(\" \")\n#Function to Transform data into into counts and normalize it\ndef VectorizeNorm(data):\n vectorizer = CountVectorizer(\n analyzer = 'word',\n lowercase = True,\n )\n features = vectorizer.fit_transform(data)\n features_nd = features.toarray()\n\n # Convert raw frequency counts into TF-IDF values\n tfidf_transformer = TfidfTransformer()\n sent_tfidf = tfidf_transformer.fit_transform(features).toarray()\n return[features,features_nd,tfidf_transformer,sent_tfidf,vectorizer]\n#\n# Spliting Data for Training and Testing\nprint(\"---> Spliting Data for Training and Testing \")\nprint(\" \")\n\ndef Split(features_nd,data_labels):\n X_train, X_test, y_train, y_test = train_test_split(\n features_nd,\n data_labels,\n train_size=0.8,\n random_state=1234)\n return[X_train, X_test, y_train, y_test]\n\n# Train a Logistics Regression classifier\nprint(\"---> Training Logistics Regression Classifier \")\nprint(\" \")\n\ndef LogisticsRegressionTrainer(X_train, y_train):\n log_model = LogisticRegression()\n log_model = log_model.fit(X=X_train, y=y_train)# Call Train Data on Naive Bayes\n return log_model\n\n# Predicting the Test set results, find accuracy\nprint(\" \")\nprint(\"---> Predicting Test set Results \")\ndef PredictResults(model,X_test,y_test):\n y_pred = model.predict(X_test)\n sklearn.metrics.accuracy_score(y_test, y_pred)\n evaluate = sklearn.metrics.accuracy_score(y_test, y_pred)\n return [y_pred,evaluate]\n\n# #Function to evaluate the Models\ndef Evaluate(evaluate):\n return evaluate\n\n\ndef main():\n print(\"Starting\")\n\ndef TestClassifer(testfile,vectorizer,tfidf_transformer,model):\n reviews_new = []\n with open(testfile) as f:\n for i in f:\n reviews_new.append(i[:-1])\n\n reviews_new_counts = vectorizer.transform(reviews_new)\n reviews_new_tfidf = tfidf_transformer.transform(reviews_new_counts)\n\n # Have classifier make a prediction\n print(\" \")\n print(\"----> Making Prediction\")\n pred = model.predict(reviews_new_tfidf)\n return pred\n\n\ndef MakePrediction(file):\n traindoc = \"../FAQs/Questions.txt\"\n trainClass = \"../FAQs/Topics.txt\"\n\n data = ManageTrainData(traindoc)\n classes = ManageTrainClass(trainClass)\n vector = VectorizeNorm(data)\n\n split = Split(vector[1],classes)\n trainLRModel = LogisticsRegressionTrainer(split[0], split[2])\n predLR = PredictResults(trainLRModel,split[1],split[3])\n # logEv = Evaluate(predLR[1])\n # print(logEv)\n\n logTest = TestClassifer(file,vector[4],vector[2],trainLRModel)\n To = open(\"topic_results.txt\",\"a\")\n To.write(\" ------------------> Topic Modelling using Logistic Regression <------------------------------ \\n\")\n #To.write(\"Using Naive bayes \\n\")\n for i in logTest:\n To.write(i+\" \\n\")\n To.close()\n", "sub_path": "Topic Modelling and Question & Answering Using NLP/MAIN FOLDER/LogisticsRModel.py", "file_name": "LogisticsRModel.py", "file_ext": "py", "file_size_in_byte": 3844, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 77, "usage_type": "attribute"}]} +{"seq_id": "388391927", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.home,name='home'),\n \n path('welcome/',views.welcome,name='welcome'),\n \n path('result/',views.result,name='result'),\n path('video/',views.video,name='video'),\n path('live/',views.live,name='live'),\n \n path('boardexams/',views.boardexams,name='boardexams'),\n path('schoolexams/',views.schoolexams,name='schoolexams'),\n path('jeemain/',views.jeemain,name='jeemain'),\n path('jeeadvance/',views.jeeadvance,name='jeeadvance'),\n path('neet/',views.neet,name='neet'),\n path('previousyear/',views.previousyear,name='previousyear'),\n \n path('book/',views.book,name='book'),\n path('book1/',views.book1,name='book1'),\n path('ncert/',views.ncert,name='ncert'),\n path('chapter1/',views.chapter1,name='chapter1'),\n path('exercise/',views.exercise,name='exercise'),\n path('question///',views.question,name='question' ),\n \n path('course/',views.course,name='course'),\n path('join/',views.join,name='join'),\n \n path('board/',views.board,name='board'),\n \n path('profile/',views.profile,name='profile'),\n \n path('drop/',views.drop,name='drop'),\n \n]", "sub_path": "Main/Application/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1241, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "193471314", "text": "# Author: Guoyu Wu\r\n# Description: This program uses the folium package to visualize data.\r\n# It defines five functions to display a choropleth map of crime data\r\n# and add apartment data as markers\r\n\r\n\r\nimport pandas as pd\r\nimport folium # a useful library for visualization\r\n\r\n\r\n# This function initiates a blank map\r\n# parameter: [latitude, longitude] of the center point\r\n# return the blank map\r\ndef initiate_map(center_location):\r\n pit_map = folium.Map(location = center_location, zoom_start = 12)\r\n return pit_map\r\n\r\n\r\n# This function draws the boundary lines of councils\r\n# parameter1: boundary_data should be a GeoJson formatted file\r\n# parameter2: pit_map is the map object you want to draw on;\r\ndef draw_boundary(boundary_data, pit_map):\r\n# import json\r\n# import requests\r\n folium.GeoJson(\r\n data = boundary_data,\r\n # specify the boundary styles\r\n style_function=lambda feature: {\r\n 'fillColor': '#ffff00', #fill with yellow\r\n 'color': 'black', #line color\r\n 'weight': 2, #line weight\r\n 'dashArray': '5, 5' #line style\r\n }\r\n ).add_to(pit_map)\r\n return pit_map\r\n \r\n\r\n# This function counts the number of crimes in each council area\r\n# parameter: the raw crime data CSV file\r\n# return a crime_stats dataframe\r\ndef crime_stat(crime_data):\r\n crime_df = pd.read_csv(crime_data)\r\n crime_df = crime_df.dropna() # remove rows containing NA\r\n crime_df = crime_df.iloc[-365:,:] # select the data for the past 365 days\r\n \r\n crime_stats = pd.DataFrame(crime_df['COUNCIL_DISTRICT'].value_counts())\r\n crime_stats.reset_index(inplace=True)\r\n crime_stats.rename(columns={'index':'Council_ID','COUNCIL_DISTRICT':'Count'},inplace=True)\r\n return crime_stats\r\n\r\n\r\n# This function draws a choropleth map of crime data\r\n# parameter1: boundary_data should be a GeoJson formatted file;\r\n# parameter2: crime_stats is a dataframe indicating how dark the color of an area should be\r\n# parameter3: pit_map is the map object you want to draw on\r\ndef map_crime(boundary_data, crime_stats, pit_map):\r\n\r\n folium.Choropleth(\r\n geo_data = boundary_data, \r\n data = crime_stats,\r\n columns=['Council_ID','Count'],\r\n key_on='feature.properties.council', #variable in the GeoJson file to bind the data to\r\n \r\n fill_color='YlOrRd', # \"red\"\r\n fill_opacity=0.5,\r\n line_opacity=0.2,\r\n highlight=True,\r\n legend_name='Crime Counts in Pittsburgh (for the past 365 days)'\r\n \r\n ).add_to(pit_map)\r\n\r\n # mark the location of Hamburgh Hall\r\n # Hamburg Hall coordinates are obtained by find_lat_lng(address) function in find_apt.py\r\n hbh_location = [40.4443494, -79.9455454]\r\n folium.CircleMarker(hbh_location, radius=15, popup='HBH 1006', \r\n color='green', fill=True, \r\n fill_color='green',fill_opacity=1).add_to(pit_map)\r\n return pit_map\r\n\r\n\r\n# This function processes the raw apartment data to lists\r\n# parameter: the raw apartment data CSV file\r\n# return three lists\r\ndef apt_process(apt_data):\r\n apt_lat = list(apt_data['lat'])\r\n apt_lng = list(apt_data['lng'])\r\n \r\n # create pop-up info dataframe\r\n labels = list()\r\n for i in range(len(apt_data['lat'])):\r\n title = apt_data['Title'].iloc[i]\r\n unit = apt_data['Unit'].iloc[i]\r\n rent = apt_data['Rent'].iloc[i]\r\n phone = apt_data['Phone'].iloc[i]\r\n df = pd.DataFrame(data=[[title,unit,rent,phone]], \r\n columns=['Title','Room Types','Rent Range','Tel'])\r\n labels.append(df)\r\n \r\n return apt_lat, apt_lng, labels\r\n\r\n\r\n# This function adds apartment markers and their associated pop-up info\r\n# parameter1,2: latitude and longitude lists of the apartment;\r\n# parameter3: a list of dataframes;\r\n# parameter4: pit_map is the map object you want to draw on\r\n# return the new map object\r\ndef add_apt_marker(apt_lat, apt_lng, labels, pit_map):\r\n\r\n for lat, lng, df in zip(apt_lat, apt_lng, labels):\r\n # add pop-up info dataframe to each apartment marker\r\n html = df.to_html(classes='table table-striped table-hover table-condensed table-responsive')\r\n popup = folium.Popup(html)\r\n # create markers\r\n folium.Marker([lat, lng], popup=popup, \r\n icon = folium.Icon(color='blue',icon='ok-sign')\r\n ).add_to(pit_map)\r\n \r\n return pit_map\r\n\r\n\r\n\r\n# The main function \r\n# parameter1: the raw apartment data CSV file;\r\n# parameter2: a list of dataframes;\r\n# return the new map object\r\ndef main(apt_data, html_savefile):\r\n # initiate a blank map with HBH in the center\r\n hbh_location = [40.4443494, -79.9455454]\r\n pit_map = initiate_map(hbh_location)\r\n \r\n # draw the boundary lines of the nine councils in Pittsburgh\r\n pit_boundary = 'PGH_CityCouncil.geojson'\r\n pit_map = draw_boundary(pit_boundary,pit_map)\r\n\r\n # process the raw data\r\n crime_data = 'Pittsburgh Police Arrest Data.csv'\r\n crime_stats = crime_stat(crime_data)\r\n apt_lat,apt_lng,labels = apt_process(apt_data)\r\n \r\n # add crime data and apartment information to the blank map\r\n pit_map = map_crime(pit_boundary, crime_stats, pit_map)\r\n pit_map = add_apt_marker(apt_lat, apt_lng, labels, pit_map)\r\n\r\n # save map to html format\r\n pit_map.save(html_savefile)\r\n \r\n # display the map (for later use in Use Case 2)\r\n from IPython.display import display\r\n display(pit_map)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n apt = 'apt_data.csv'# output file from find_apt.py\r\n apt_data = pd.read_csv(apt)\r\n main(apt_data,'pit_map.html')\r\n\r\n\r\n", "sub_path": "Use_Case_2_Recommendation/map_apt.py", "file_name": "map_apt.py", "file_ext": "py", "file_size_in_byte": 5864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "folium.Map", "line_number": 15, "usage_type": "call"}, {"api_name": "folium.GeoJson", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "folium.Choropleth", "line_number": 58, "usage_type": "call"}, {"api_name": "folium.CircleMarker", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 95, "usage_type": "call"}, {"api_name": "folium.Popup", "line_number": 112, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 114, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 115, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 149, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "535877496", "text": "\"\"\"Example DAG demonstrating the usage of the XComArgs.\"\"\"\nimport logging\n\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python import PythonOperator, get_current_context, task\nfrom airflow.utils.dates import days_ago\n\nlog = logging.getLogger(__name__)\n\n\ndef generate_value():\n \"\"\"Dummy function\"\"\"\n return \"Bring me a shrubbery!\"\n\n\n@task()\ndef print_value(value):\n \"\"\"\n able to get context\n \"\"\"\n ctx = get_current_context()\n log.info(\"The knights of Ni say: %s (at %s)\", value, ctx['ts'])\n\n\nwith DAG(\n dag_id='example_xcom_args',\n default_args={'owner': 'airflow'},\n start_date=days_ago(2),\n schedule_interval=None,\n tags=['example'],\n) as dag:\n task1 = PythonOperator(\n task_id='generate_value',\n python_callable=generate_value,\n )\n\n print_value(task1.output)\n\nwith DAG(\n \"example_xcom_args_with_operators\",\n default_args={'owner': 'airflow'},\n start_date=days_ago(2),\n schedule_interval=None,\n tags=['example'],\n) as dag2:\n bash_op1 = BashOperator(task_id=\"c\", bash_command=\"echo c\")\n bash_op2 = BashOperator(task_id=\"d\", bash_command=\"echo c\")\n xcom_args_a = print_value(\"first!\")\n xcom_args_b = print_value(\"second!\")\n\n bash_op1 >> xcom_args_a >> xcom_args_b >> bash_op2\n", "sub_path": "common/taskflow_with_operator_in_dag_instance.py", "file_name": "taskflow_with_operator_in_dag_instance.py", "file_ext": "py", "file_size_in_byte": 1358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "airflow.operators.python.get_current_context", "line_number": 22, "usage_type": "call"}, {"api_name": "airflow.operators.python.task", "line_number": 17, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 26, "usage_type": "call"}, {"api_name": "airflow.utils.dates.days_ago", "line_number": 29, "usage_type": "call"}, {"api_name": "airflow.operators.python.PythonOperator", "line_number": 33, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 40, "usage_type": "call"}, {"api_name": "airflow.utils.dates.days_ago", "line_number": 43, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 47, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "560179203", "text": "# coding=utf-8\nimport os\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport pandas as pd\nimport datetime, time\nfrom sqlalchemy import create_engine\nimport openpyxl\n\n# 文件路径(可以是具体文件或者目录)\nfilepath = r'C:\\Users\\49921\\Desktop\\load'\nprint('读取 ' + filepath + ' 目录下所有文件')\n\n# 连接MySQL数据库\nengine = create_engine(\n \"mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4\".format('root', 'taia@2021', '172.22.5.11', '3306', 'flink'))\nprint('连接数据库成功')\n\n# 导入到mysql之后的表名\ntablename = 'userbehavior_test'\n# 英文列名\ncolumns_name = ['user_id', 'item_id', 'behavior_type', 'user_geohash', 'item_category', 'time']\n\ncolumns_dtype = {'用户ID': str, '商品ID': str, '行为': str, '用户地理位置': str, '商品分类': str, '时间': str}\n\n# 读取chunksize为10万\nchunksize = 100000\n\nstart_time = datetime.datetime.now()\n\n# 读取目录下所有文件\nfiles = os.listdir(filepath)\nfor filename in files:\n total_records = 0\n print('读取文件 ' + filename)\n if (filename[-4:] == '.csv'):\n # 使用chunksize分块读取大型csv文件,这里每次读取chunksize为1万\n df_chunk = pd.read_csv(filepath + '\\\\' + filename, chunksize=chunksize,\n iterator=True, dtype=columns_dtype)\n df_chunk.columns = columns_name\n # 读取csv文件\n for chunk in df_chunk:\n start_time_temp = datetime.datetime.now()\n chunk_list = []\n chunk_list.append(chunk)\n total_records = total_records + len(chunk)\n df_concat = pd.concat(chunk_list) # 再把这些块组合成一个DataFrame\n # 写入数据库 , append :如果表存在,则将数据添加到这个表的后面 、fail:如果表存在就不操作、replace:如果存在表,删了,重建\n df_concat.to_sql(tablename, con=engine, if_exists='append', index=False, chunksize=None)\n end_time_temp = datetime.datetime.now()\n duration_str = time.strftime(\"%H:%M:%S\", time.gmtime((end_time_temp - start_time_temp).total_seconds()))\n print('本批写入%s条数据成功 , 累计写入%s条数据, duration: %s' % (len(chunk), total_records, duration_str))\n elif (filename[-5:] == '.xlsx'):\n # 读取xlsx文件\n # The first row is the header. We have already read it, so we skip it.\n skiprows = 1\n while True:\n start_time_temp = datetime.datetime.now()\n # 使用chunksize分块读取大型xlsx文件,这里每次读取chunksize为1万 sheet_name='Sheet1',\n df_chunk = pd.read_excel(filepath + '\\\\' + filename,\n nrows=chunksize, skiprows=skiprows, header=None)\n if len(df_chunk.columns) > 0:\n df_chunk.columns = columns_name\n skiprows += chunksize\n # When there is no data, we know we can break out of the loop.\n if not df_chunk.shape[0] or df_chunk.shape[0] == 0:\n break\n total_records = total_records + len(df_chunk)\n columns = {i: col for i, col in enumerate(columns_name)}\n df_chunk.to_sql(tablename, con=engine, if_exists='append', index=False)\n end_time_temp = datetime.datetime.now()\n duration_str = time.strftime(\"%H:%M:%S\", time.gmtime((end_time_temp - start_time_temp).total_seconds()))\n print('本批写入%s条数据成功 , 累计写入%s条数据, duration: %s' % (len(df_chunk), total_records, duration_str))\n\nend_time = datetime.datetime.now()\nduration_str = time.strftime(\"%H:%M:%S\", time.gmtime((end_time - start_time).total_seconds()))\nprint(\"start_time: %s , end_time: %s , 数据同步全部完成共用时 duration: %s\" % (start_time, end_time, duration_str))\n", "sub_path": "Spark/pysparkpython27/load/CsvLoader.py", "file_name": "CsvLoader.py", "file_ext": "py", "file_size_in_byte": 3833, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 53, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 74, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 78, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "293407969", "text": "from gym import Env\nfrom gym.spaces import Discrete, Box\n\nfrom state import *\nimport tensorflow as tf\nimport numpy as np\nimport random\n\nINVALID_ACTION_REWARD = -10\nVALID_ACTION_REWARD = 10\nWIN_REWARD = 100\nLOSS_REWARD = -100\nEAT_TOKEN = 10\n\nclass RoPaSci360(Env):\n def __init__(self,\n player = 'upper',\n opponent = 'random',\n log = 'True'):\n \n # Constants\n self.max_turns = 360\n self.log = log\n \n #\n # Observation + Action spaces\n # ---------------------------\n # Observations: RoPaSci board containing 61 hexes, with 9 types of maximum number of tokens for each player.\n # Actions: (Every board position) * (Every board position)\n #\n # Note: not every action is legal\n #\n self.action_space = Discrete(61 * 61)\n self.observation_space = Box(-9, 9, 61)\n \n self.player = player\n self.opponent = opponent\n \n # reset and build state\n self.reset()\n \n def reset(self):\n self.state = GameState()\n self.state.turn_number = 0\n self.state.game_state = 'running'\n self.state.upper_inv = 0\n self.state.lower_inv = 0\n \n self.upper = list()\n self.lower = list()\n self.upper_throws = 9\n self.upper_throws = 9\n \n self.done = False\n \n return self.state\n \n def step(self, action):\n assert self.action_space.contains(action), \"ACTION ERROR {}\".format(action)\n \n if action not in self.state._actions(self.player):\n reward = INVALID_ACTION_REWARD\n return self.state, reward, self.done, self.info\n \n if self.done:\n return (self.state, 0.0, True, self.info)\n if self.move_count > MAX_TURNS:\n return (self.state, 0.0, True, self.info)\n \n reward = VALID_ACTION_REWARD\n def render(self):\n pass\n ", "sub_path": "Part B/ADDITIONAL_PYLON/.ipynb_checkpoints/rps_env-checkpoint.py", "file_name": "rps_env-checkpoint.py", "file_ext": "py", "file_size_in_byte": 2014, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "gym.Env", "line_number": 15, "usage_type": "name"}, {"api_name": "gym.spaces.Discrete", "line_number": 33, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "418549892", "text": "from django.utils.translation import ugettext as _\n\n\nEDUCATION = (\n ('None', _('None')),\n ('non_formal', _('Non Formal')),\n ('primary', _('Primary')),\n ('secondary', _('Secondary ')),\n ('tertiary', _('Tertiary (Higher than secondary, such as vocational college or university)')),\n)\n\nEMPLOYMENT = (\n ('full_time', _('Full-time employed')),\n ('part_time', _('Part-time employed')),\n ('seasonal', _('Seasonal or intermittent employment')),\n ('informal', _('Informal self-employment')),\n ('student', _('Student')),\n ('retired', _('Retired')),\n ('not_woking', _('Not working (non-student, not retired)')),\n ('not_answering', _('Don\\'t want to answer')),\n)\n\nMARITAL_STATUS = (\n ('single', _('Single/Never married')),\n ('cohabiting', _('Cohabitating')),\n ('married', _('Married ')),\n ('divorced', _('Divorced or formally separated')),\n ('widowed', _('Widowed')),\n ('not_answering', _('Don\\'t want to answer')),\n)\n\nALCOHOL_INTAKE = (\n ('NEVER', _('Never')),\n ('monthly', _('Monthly or less')),\n ('per_month', _('2-4 times per month')),\n ('per_week', _('2-3 times per week')),\n ('more_times', _('4 or more times per week')),\n)\n\n\nPARTNER_HIV_STATUS = (\n ('positive', 'HIV Positive'),\n ('negative', 'HIV Negative'),\n ('not_sure', 'I am not sure'),\n ('declined', 'Decline to answer'),\n)\n\n\nRELATIONSHIP_TYPE = (\n ('spouse', 'Spouse (husband/wife)'),\n ('cohabiting', 'Cohabitating partner'),\n ('boy_girl_friend', 'Boyfriend/Girlfriend'),\n ('casual', 'Casual (known) sex partner'),\n ('partner_unknown', 'One time partner (previously unknown)'),\n ('sex_worker', 'Commercial sex worker'),\n ('OTHER', 'Other, specify'),\n ('declined', 'Decline to answer'),\n)\n\n\nTESTING_CENTRE = (\n ('tvct_inside_comunity', _('TVCT in this community')),\n ('tvct_outside_community', _('TVCT outside of this community')),\n ('public', _('Public Health Facility')),\n ('private', _('Private Health Facility')),\n ('door_to_door', _('Door to door projects')),\n ('other_vct_site', _('Other VCT site')),\n ('OTHER', _('Other')),\n ('dont_remember', _('Don\\'t remember')),\n)\n\nYES_NO_DECLINED = (\n ('Yes', _('Yes')),\n ('No', _('No')),\n ('DECLINED', _('Declined to answer')),\n)\n\n\nREASON_NOT_TESTING = (\n ('know_status', _('I already know I am HIV positive.')),\n ('not_risk', _('I don\\'t believe I am at risk of getting HIV.')),\n ('afraid', _('I am afraid to find out the result.')),\n ('perception', _('I am afraid of what others would think of me.')),\n ('prohibited', _('Friends/Family did not want me to get an HIV test.')),\n ('time_work', _('I did not have time due to work.')),\n ('time_family', _('I didn\\'t have time due to family obligations')),\n ('partner_refused', _('My sexual partner didn\\'t want me to get an HIV test')),\n ('not_sure', _('I am not sure')),\n ('decline', _('Decline to answer')),\n)\n\nSYMPTOMS = (\n ('cough', _('Cough > 2 weeks')),\n ('fever', _('Fever > 2 weeks')),\n ('big_lymph', _('Enlarged lymph nodes')),\n ('cough_blood', _('Coughing up blood')),\n ('night_sweats', _('Night Sweats')),\n ('weight_loss', _('Unexplained weight loss')),\n ('none', _('None of the above symptoms reported')),\n)\n\nREFERRED_FOR = (\n ('circumcision', _('Circumcision')),\n ('cervical_screen', _('Cervical Screening')),\n ('sti_screen', _('STI screening')),\n ('family_plan', _('Family Planning')),\n ('tb_screen', _('TB screening')),\n ('couple_test', _('Couple Testing')),\n ('pmtct', _('PMTCT')),\n ('hiv_care', _('HIV Care and Treatment')),\n ('counselling', _('Supportive Counseling')),\n ('social_welfare', _('Psycho social support/Social Welfare')),\n)\n\nREFERRED_TO = (\n ('health_facility', _('Public/Private Health Facility')),\n ('religious', _('Religious Institution')),\n ('plwh', _('PLWH/A Association')),\n ('social_welfare', _('Social Welfare facilities')),\n ('youth_friendly', _('Youth Friendly Services')),\n)\n", "sub_path": "bhp066/apps/bcpp_htc_subject/choices.py", "file_name": "choices.py", "file_ext": "py", "file_size_in_byte": 3997, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.utils.translation.ugettext", "line_number": 5, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 6, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 7, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 8, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 9, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 14, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 15, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 17, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 18, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 20, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 24, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 27, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 28, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 33, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 34, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 35, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 36, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 37, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 62, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 63, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 65, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 66, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 67, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 68, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 69, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 73, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 74, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 75, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 80, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 81, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 82, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 83, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 84, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 85, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 86, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 87, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 88, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 89, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 93, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 94, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 95, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 96, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 97, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 98, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 99, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 103, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 104, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 105, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 106, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 107, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 108, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 109, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 110, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 111, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 112, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 116, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 117, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 118, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 119, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "320805811", "text": "\nfrom jaratoolbox import behavioranalysis\nfrom jaratoolbox import loadbehavior\nimport numpy as np\nfrom pylab import *\nimport matplotlib.pyplot as plt\n\nsubjects = ['hm4d003', 'hm4d004', 'hm4d005', 'hm4d006']\nsession = '20140820a'\n\nfor ind, subject in enumerate(subjects):\n fname=loadbehavior.path_to_behavior_data(subject,'nick','2afc',session)\n\n\n bdata=loadbehavior.BehaviorData(fname)\n from jaratoolbox import settings \n\n targetFrequency=bdata['targetFrequency']\n valid=bdata['valid']\n choice=bdata['choice']\n intensities=bdata['targetIntensity']\n choiceRight = choice==bdata.labels['choice']['right']\n\n\n possibleFreq = np.unique(targetFrequency)\n nFreq = len(possibleFreq) \n trialsEachFreq = behavioranalysis.find_trials_each_type(targetFrequency,possibleFreq)\n\n positions=[(0,0), (0,1), (1,0), (1,1)]\n ax1=plt.subplot2grid((2,2), positions[ind])\n\n for intensity in np.unique(intensities):\n nTrialsEachFreq = np.empty(nFreq)\n nRightwardEachFreq = np.empty(nFreq)\n for indf,thisFreq in enumerate(possibleFreq):\n nTrialsEachFreq[indf] = sum(valid & trialsEachFreq[:,indf] & (intensities==intensity))\n nRightwardEachFreq[indf] = sum(valid & choiceRight & trialsEachFreq[:,indf] & (intensities==intensity))\n\n fractionRightEachFreq = nRightwardEachFreq/nTrialsEachFreq.astype(float)\n\n #plot(possibleFreq,fractionRightEachFreq,'-o')\n #gca().set_xscale('log')\n ax1.plot(fractionRightEachFreq,'-o', label=intensity)\n title(subject)\n legend()\n ylim([0,1])\nshow()\n\n\n'''\n fractionRightEachFreq = nRightwardEachFreq/nTrialsEachFreq.astype(float)\n\n #plot(possibleFreq,fractionRightEachFreq,'-o')\n #gca().set_xscale('log')\n positions=[(0,0), (0,1), (1,0), (1,1)]\n ax1=plt.subplot2grid((2,2), positions[ind])\n ax1.plot(fractionRightEachFreq,'-o')\n title(subject)\n ylim([0,1])\nshow()\n'''\n", "sub_path": "oldjaratest/nick/test018_plot_all_intensities_psycurve.py", "file_name": "test018_plot_all_intensities_psycurve.py", "file_ext": "py", "file_size_in_byte": 1920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "jaratoolbox.loadbehavior.path_to_behavior_data", "line_number": 12, "usage_type": "call"}, {"api_name": "jaratoolbox.loadbehavior", "line_number": 12, "usage_type": "name"}, {"api_name": "jaratoolbox.loadbehavior.BehaviorData", "line_number": 15, "usage_type": "call"}, {"api_name": "jaratoolbox.loadbehavior", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 25, "usage_type": "call"}, {"api_name": "jaratoolbox.behavioranalysis.find_trials_each_type", "line_number": 27, "usage_type": "call"}, {"api_name": "jaratoolbox.behavioranalysis", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "206416336", "text": "from typing import List\n\nfrom TestCase import TreeNode, null\n\n'''\n给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。\n\n设计一个算法来计算你所能获取的最大利润。你最多可以完成 两笔 交易。\n\n注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n\n示例 1:\n\n输入: [3,3,5,0,0,3,1,4]\n输出: 6\n解释: 在第 4 天(股票价格 = 0)的时候买入,在第 6 天(股票价格 = 3)的时候卖出,这笔交易所能获得利润 = 3-0 = 3 。\n  随后,在第 7 天(股票价格 = 1)的时候买入,在第 8 天 (股票价格 = 4)的时候卖出,这笔交易所能获得利润 = 4-1 = 3 。\n示例 2:\n\n输入: [1,2,3,4,5]\n输出: 4\n解释: 在第 1 天(股票价格 = 1)的时候买入,在第 5 天 (股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。   \n  注意你不能在第 1 天和第 2 天接连购买股票,之后再将它们卖出。   \n  因为这样属于同时参与了多笔交易,你必须在再次购买前出售掉之前的股票。\n示例 3:\n\n输入: [7,6,4,3,1] \n输出: 0 \n解释: 在这个情况下, 没有交易完成, 所以最大利润为 0。\n'''\n\n'''\n通用求解框架,定义两种状态,0,1,分别代表卖出,和持有\ndp[i][k][state] 表示第i天 至多k次卖出机会, 此时是持有/卖出我的最大利润 \ndp[i][k][0] = max(dp[i-1][k][0], dp[i-1][k][1] + prices[i]) dp[i-1][k][1] + prices[i]表示昨天持有,今天卖出\ndp[i][k][1] = max(dp[i-1][k][1], dp[i-1][k-1][0] - prices[i]) 卖的时候有输入,买的时候就要付出价格,保证卖出的利润一定大于当前持有,最终最大利润就是求卖出\n初始化 : dp[0][k][0] = 0 dp[0][k][1] = -infinity dp[i][0][0] =0 dp[i][0][1] = -infinity -infinity表示不可能存在的情况\n\n第三题,k = +infinity with cooldown\n\n每次 sell 之后要等一天才能继续交易。只要把这个特点融入上一题的状态转移方程即可:\n\ndp[i][0] = max(dp[i-1][0], dp[i-1][1] + prices[i])\ndp[i][1] = max(dp[i-1][1], dp[i-2][0] - prices[i])\n解释:第 i 天选择 buy 的时候,要从 i-2 的状态转移,而不是 i-1 。\n翻译成代码:\n\nint maxProfit_with_cool(int[] prices) {\n int n = prices.length;\n int dp_i_0 = 0, dp_i_1 = Integer.MIN_VALUE;\n int dp_pre_0 = 0; // 代表 dp[i-2][0]\n for (int i = 0; i < n; i++) {\n int temp = dp_i_0;\n dp_i_0 = Math.max(dp_i_0, dp_i_1 + prices[i]);\n dp_i_1 = Math.max(dp_i_1, dp_pre_0 - prices[i]);\n dp_pre_0 = temp;\n }\n return dp_i_0;\n}\n第四题,k = +infinity with fee\n\n每次交易要支付手续费,只要把手续费从利润中减去即可。改写方程:\n\ndp[i][0] = max(dp[i-1][0], dp[i-1][1] + prices[i])\ndp[i][1] = max(dp[i-1][1], dp[i-1][0] - prices[i] - fee)\n解释:相当于买入股票的价格升高了。\n在第一个式子里减也是一样的,相当于卖出股票的价格减小了。\n直接翻译成代码:\n\nint maxProfit_with_fee(int[] prices, int fee) {\n int n = prices.length;\n int dp_i_0 = 0, dp_i_1 = Integer.MIN_VALUE;\n for (int i = 0; i < n; i++) {\n int temp = dp_i_0;\n dp_i_0 = Math.max(dp_i_0, dp_i_1 + prices[i]);\n dp_i_1 = Math.max(dp_i_1, temp - prices[i] - fee);\n }\n return dp_i_0;\n}\n\n第六题,k = any integer\n\n有了上一题 k = 2 的铺垫,这题应该和上一题的第一个解法没啥区别。但是出现了一个超内存的错误,原来是传入的 k 值会非常大,dp 数组太大了。现在想想,交易次数 k 最多有多大呢?\n\n一次交易由买入和卖出构成,至少需要两天。所以说有效的限制 k 应该不超过 n/2,如果超过,就没有约束作用了,相当于 k = +infinity。这种情况是之前解决过的。\n\n直接把之前的代码重用:\n\nint maxProfit_k_any(int max_k, int[] prices) {\n int n = prices.length;\n if (max_k > n / 2) \n return maxProfit_k_inf(prices);\n\n int[][][] dp = new int[n][max_k + 1][2];\n for (int i = 0; i < n; i++) \n for (int k = max_k; k >= 1; k--) {\n if (i - 1 == -1) { /* 处理 base case */ }\n dp[i][k][0] = max(dp[i-1][k][0], dp[i-1][k][1] + prices[i]);\n dp[i][k][1] = max(dp[i-1][k][1], dp[i-1][k-1][0] - prices[i]); \n }\n return dp[n - 1][max_k][0];\n}\n\n'''\n\n\nclass Solution:\n def maxProfit_recur(self, prices: List[int], n=2, start=0) -> int:\n print(n, start)\n if n == 0 or start >= len(prices):\n return 0\n maxprofit = 0\n\n if n == 1:\n minprice = float('inf')\n for price in prices[start:]:\n maxprofit = max(price - minprice, maxprofit)\n minprice = min(price, minprice)\n else:\n curV = 0\n for i in range(start + 1, len(prices)):\n if prices[i] > prices[i - 1]:\n curV += prices[i] - prices[i - 1]\n else:\n if curV > 0:\n maxprofit = max(maxprofit, curV + self.maxProfit_recur(prices, n - 1, i))\n curV += prices[i] - prices[i - 1]\n if curV < 0:\n return max(maxprofit, self.maxProfit_recur(prices, n, i))\n maxprofit = max(maxprofit, curV)\n return maxprofit\n\n def maxProfit(self, prices: List[int]) -> int:\n dp10 = 0\n dp11 = -float('inf')\n dp20 = 0\n dp21 = -float('inf')\n for p in prices:\n dp10 = max(dp10, dp11 + p)\n dp11 = max(dp11, -p)\n dp20 = max(dp20, dp21 + p)\n dp21 = max(dp21, dp10 - p)\n return dp20\n\n\nclass Solution124:\n '''\n 给定一个非空二叉树,返回其最大路径和。\n 本题中,路径被定义为一条从树中任意节点出发,达到任意节点的序列。该路径至少包含一个节点,且不一定经过根节点。\n 示例 1:\n\n 输入: [1,2,3]\n\n 1\n / \\\n 2 3\n\n 输出: 6\n 示例 2:\n\n 输入: [-10,9,20,null,null,15,7]\n\n   -10\n    / \\\n   9  20\n     /  \\\n    15   7\n\n 输出: 42\n '''''\n\n def maxPathSum_down(self, root: TreeNode) -> int:\n # 理解错了题目,以为同一分支上才算是路径,这样就可以把这个问题转换成最大子串和的非递归形式\n stack = [[None, -float('inf')]]\n maxSum = 0\n while root or len(stack) > 1:\n if root:\n while root:\n stack.append([root, max(0, root.val, stack[-1][1] + root.val)])\n maxSum = max(maxSum, stack[-1][1])\n root = root.left\n\n else:\n root = stack[-1][0].right\n if not root:\n pre, _ = stack.pop()\n while len(stack) > 1 and stack[-1][0].right == pre:\n pre, _ = stack.pop()\n return maxSum\n\n def maxPathSum(self, root: TreeNode) -> int:\n maxPath = -float('inf')\n\n def maxPathCore(root: TreeNode) -> int:\n if not root:\n return 0\n nonlocal maxPath\n left = maxPathCore(root.left)\n right = maxPathCore(root.right)\n maxPath = max(maxPath, left + right + root.val)\n return max(left + root.val, right + root.val, 0)\n\n maxPathCore(root)\n return maxPath\n\n\nroot = TreeNode.createTree([-10])\nprint(Solution124().maxPathSum(root))\n\n", "sub_path": "leetcode/200/121-124 买卖股票的最佳时机.py", "file_name": "121-124 买卖股票的最佳时机.py", "file_ext": "py", "file_size_in_byte": 7599, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "typing.List", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 133, "usage_type": "name"}, {"api_name": "TestCase.TreeNode", "line_number": 172, "usage_type": "name"}, {"api_name": "TestCase.TreeNode", "line_number": 191, "usage_type": "name"}, {"api_name": "TestCase.TreeNode", "line_number": 194, "usage_type": "name"}, {"api_name": "TestCase.TreeNode.createTree", "line_number": 207, "usage_type": "call"}, {"api_name": "TestCase.TreeNode", "line_number": 207, "usage_type": "name"}]} +{"seq_id": "380663527", "text": "#!/Users/pedro/Anaconda3/python\r\n# coding: UTF-8\r\n\r\n# CGI modules imports\r\nimport cgi\r\nimport cgitb\r\n\r\n# Import path link to make connections to all folders\r\nimport path_link\r\n\r\n# Import sql manager and session manager\r\n# import MySQLManager\r\nimport SQLiteManager\r\nimport SessionManager\r\n\r\n# Traceback for error messages\r\nimport os\r\nimport traceback\r\nimport datetime\r\n\r\n__author__ = 'pedro'\r\n\r\n# Enable cgi traceback\r\ncgitb.enable()\r\n\r\n# Set the database connector\r\n# db_connector = MySQLManager.MySQLManager\r\ndb_connector = SQLiteManager.SQLiteManager\r\n\r\n\r\nclass Upload(object):\r\n \"\"\"\r\n Handle the login operations and redirects\r\n \"\"\"\r\n def __init__(self):\r\n \"\"\"\r\n Creates the login object handler\r\n :return:\r\n \"\"\"\r\n # Set the html to be empty\r\n self.html = \"\"\r\n\r\n # Open the session manager\r\n self.session = SessionManager.SessionManager()\r\n\r\n def post(self, form):\r\n \"\"\"\r\n Receive the values from form\r\n :param form: dictionary with post values\r\n :return:\r\n \"\"\"\r\n # Get filename here.\r\n fileitem = form['cFile']\r\n\r\n # Path to the user file folder\r\n user_path = os.path.join(\"..\", \"data\", self.session.get(\"usuario\")[\"usuario_login\"])\r\n\r\n # Verify if a file gas been selected\r\n if fileitem.filename:\r\n # Check if directory has been created\r\n if not os.path.exists(user_path):\r\n # Create directory\r\n os.mkdir(user_path)\r\n\r\n # Remove client directory path\r\n fn = os.path.basename(fileitem.filename)\r\n\r\n # Write file on server\r\n open(os.path.join(user_path, fn), 'wb').write(fileitem.file.read())\r\n\r\n # Criamos a mensagem a ser disponibilizada\r\n self.session.set(\"upload_status\", \"Sucesso\")\r\n self.session.set(\"upload_message\", \"Upload bem sucedido\")\r\n print(\"Location: ../fetch_page.py?page=upload\\r\\n\\r\")\r\n self.session.save()\r\n\r\n # Insert the query for the log\r\n try:\r\n # We set a create the database manager object\r\n connector = db_connector(\"127.0.0.1\", \"root\", \"\", \"iz\")\r\n\r\n connector.query_insert(\"log\", (\"log_data\", \"log_evento\", \"log_usuario\", \"log_ip\"),\r\n (datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d'), \"Fez Upload de \" + fn,\r\n self.session.get(\"usuario\")['usuario_login'], os.environ[\"REMOTE_ADDR\"]))\r\n except:\r\n raise Exception(traceback.format_exc())\r\n\r\n else:\r\n self.session.set(\"upload_status\", \"Fracasso\")\r\n self.session.set(\"upload_message\", str(fileitem.filename))\r\n print(\"Location: ../fetch_page.py?page=upload\\r\\n\\r\")\r\n self.session.save()\r\n\r\nif __name__ == '__main__':\r\n form = cgi.FieldStorage()\r\n l = Upload()\r\n print(\"Content-Type: text/html\")\r\n l.post(form)\r\n", "sub_path": "YouTube Ignorancia Zero/Banco de Dados/152 - Bancos de Dados VI - SQLite/Upload.py", "file_name": "Upload.py", "file_ext": "py", "file_size_in_byte": 3035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cgitb.enable", "line_number": 24, "usage_type": "call"}, {"api_name": "SQLiteManager.SQLiteManager", "line_number": 28, "usage_type": "attribute"}, {"api_name": "SessionManager.SessionManager", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 83, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 84, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 86, "usage_type": "call"}, {"api_name": "cgi.FieldStorage", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "333441168", "text": "import sqlite3\r\n\r\ndb = sqlite3.connect(database=\"test.db\")\r\ncur = db.cursor()\r\n\r\ncur.execute(\"create table if not exists test (No int NOT NULL, Name varchar2(100), Company varchar2(100), Price int, Quantity int, Discount float, primary key (No))\")\r\nprint(\"table created\")\r\nno, name, company = 1, 'bisuits', 'parleg'\r\nprice, quantity, discount = 10, 10, 10.0\r\nls = (no, name, company, price, quantity, discount)\r\ncur.execute(f\"insert into test values(?, ?, ?, ?, ?, ?)\", ls)\r\nprint(\"row inserted\")\r\n\r\ncur.execute(\"select * from test\")\r\nprint(cur.fetchall())\r\n", "sub_path": "Res/dbtest.py", "file_name": "dbtest.py", "file_ext": "py", "file_size_in_byte": 558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sqlite3.connect", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "630632776", "text": "import numpy as np\r\nimport open3d as o3d\r\nfrom PIL import Image\r\nimport options\r\nimport utils\r\nimport torch\r\nimport transform\r\n\r\nif __name__ == \"__main__\":\r\n cfg = options.get_arguments()\r\n cfg.batchSize = cfg.inputViewN\r\n\r\n model = utils.build_structure_generator(cfg).to(cfg.device)\r\n\r\n print(\"======= IMPORT PRETRAINED MODEL =======\")\r\n\r\n png = Image.open('data/wood/64wood.png')\r\n png.load()\r\n rgb = Image.new(\"RGB\", png.size, (255, 255, 255))\r\n rgb.paste(png, mask=png.split()[3])\r\n\r\n image_data = np.array(rgb, dtype='uint8')\r\n\r\n image_data = image_data / 255.0\r\n\r\n arr24 = np.array([image_data])\r\n\r\n for i in range(23):\r\n arr24 = np.concatenate((arr24, np.array([image_data])))\r\n\r\n input_images = torch.from_numpy(arr24) \\\r\n .permute((0, 3, 1, 2)) \\\r\n .float().to(cfg.device)\r\n\r\n print(\"======= IMPORT IMAGE =======\")\r\n\r\n fuseTrans = cfg.fuseTrans\r\n\r\n points24 = np.zeros([cfg.inputViewN, 1], dtype=np.object)\r\n\r\n XYZ, maskLogit = model(input_images)\r\n mask = (maskLogit > 0).float()\r\n # ------ build transformer ------\r\n XYZid, ML = transform.fuse3D(\r\n cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]\r\n\r\n XYZid, ML = XYZid.permute([0, 2, 1]), ML.squeeze()\r\n for a in range(cfg.inputViewN):\r\n xyz = XYZid[a] # [VHW, 3]\r\n ml = ML[a] # [VHW]\r\n points24[a, 0] = (xyz[ml > 0]).detach().cpu().numpy()\r\n\r\n pcd = o3d.geometry.PointCloud()\r\n pcd.points = o3d.utility.Vector3dVector(points24[0,0])\r\n o3d.io.write_point_cloud(f\"results/{cfg.model}_{cfg.experiment}/64wood.ply\", pcd)\r\n\r\n print(\"======= TRANSFORM TO POINT CLOUD =======\")\r\n", "sub_path": "wood_model.py", "file_name": "wood_model.py", "file_ext": "py", "file_size_in_byte": 1671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "options.get_arguments", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.build_structure_generator", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.object", "line_number": 39, "usage_type": "attribute"}, {"api_name": "transform.fuse3D", "line_number": 44, "usage_type": "call"}, {"api_name": "open3d.geometry.PointCloud", "line_number": 53, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 53, "usage_type": "attribute"}, {"api_name": "open3d.utility.Vector3dVector", "line_number": 54, "usage_type": "call"}, {"api_name": "open3d.utility", "line_number": 54, "usage_type": "attribute"}, {"api_name": "open3d.io.write_point_cloud", "line_number": 55, "usage_type": "call"}, {"api_name": "open3d.io", "line_number": 55, "usage_type": "attribute"}]} +{"seq_id": "120927532", "text": "# OS: Ubuntu, 18.04.1 LTS\n# Python: Python 2.7.15\n# Mongodb: v3.2.21 \n# Siteng Cai\nimport sys\nimport os.path\nimport argparse\nimport re\nimport itertools\nimport functions\n \ndef is_number(s):\n\ttry:\n\t\tint(s)\n\t\treturn True\n\texcept ValueError:\n\t\tpass\n\treturn False\n\ndef seq_read(fp):\n\tline = fp.readline().replace(\" \", \"\").rstrip()\n\tseq = \"\"\n\twhile line != '//':\n\t\tseq += line\n\t\tline = fp.readline().replace(\" \", \"\").rstrip()\n\treturn seq\n\ndef tableGeneration(filepath,ptms):\n\ttable = functions.connectMongoDB('uniprot','table')\n\ttable.drop()\n\tout_id = \"\"\n\tout_ac = []\n\tout_position = []\n\tout_data = dict()\n\tsequence = \"\"\n\ttemp_ptm = \"\"\n\tprev_fp_pos = 0\n\tcheck = []\n\t\n\tfp = open(filepath)\n\tline = fp.readline()\n\t\n\twhile line:\n\t\tcollapsed = ' '.join(line.split())\n\t\tdata = collapsed.split(\";\")\n\t\tinfo = data[0].split(\" \")\n\t\ttag = info[0]\n\t\t#print(info[0]+\" info1 \"+info[1]+\"\\n\")\n\t\tif tag == \"ID\":\n\t\t\tout_id = info[1]\n\t\telif tag == \"AC\":\n\t\t\tout_ac.append(info[1])\n\t\t\tif len(data) > 2:\n\t\t\t\tfor x in range(1, len(data)-1):\n\t\t\t\t\tout_ac.append(data[x].lstrip())\n\t\telif tag == \"OC\":\n\t\t\tcheck.append(info[1].lstrip())\n\t\t\tif len(data) > 2:\n\t\t\t\tfor x in range(1, len(data)-1):\n\t\t\t\t\tcheck.append(data[x].lstrip())\n\t\t\tout_data = {\"_id\" : out_id,\"ac\":out_ac,\"species\":check}\n\t\telif tag == \"FT\":\n\t\t\ttemp_ptm = \"\"\n\t\t\tout_position = functions.remove_duplicates([info[2],info[3]])\n\t\t\ttemp_ptm = \" \".join(info[4:])\n\t\t\t#if \"P0C9J5\" in out_ac:\n\t\t\t#\tprint(\"################temp_ptm is 1 \"+temp_ptm+\"\\n\")\n\t\t\tprev_fp_pos = fp.tell()\n\t\t\tline = ' '.join(fp.readline().split())\n\t\t\tinfo = line.split(\" \")\n\t\t\twhile info[0] == \"FT\":\n\t\t\t\tif len(info) > 3 and is_number(info[2]) and is_number(info[3]):\n\t\t\t\t\t#if \"Q9TT90\" in out_ac:\n\t\t\t\t\t# print(\"###########temp_ptm is 2 \"+temp_ptm+\"\\n\")\n\t\t\t\t\ttemp_ptm = re.sub('(\\.*)\\)',')',temp_ptm)\n\t\t\t\t\t#if \"P0C9J5\" in out_ac:\n\t\t\t\t\t#\tprint(\"################temp_ptm is 2 \"+temp_ptm+\"\\n\")\n\t\t\t\t\tfor doc in ptms:\n\t\t\t\t\t\t#if \"Q9TT90\" in out_ac and doc == 'Glycyllysineisopeptide(Lys-Gly)(interchainwithG-CterinSUMO)':\n\t\t\t\t\t\t#\tprint(doc+\" vs \"+re.sub('[\\.|\\;].*','',temp_ptm)+\"\\n\")\n\t\t\t\t\t\t#if \"P0C9J5\" in out_ac and doc == 'N-linked (GlcNAc) asparagine':\n\t\t\t\t\t\t#\tprint(doc+\" 2vs \"+re.sub('[\\.|\\;].*','',temp_ptm)+\"\\n\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tif doc == re.sub('[\\.|\\;].*','',temp_ptm):\n\t\t\t\t\t\t\t#if \"P0C9J5\" in out_ac:\n\t\t\t\t\t\t\t#\tprint(\"2 yes\\n\"+\"position\"+str(out_position)+\"\\n\")\n\t\t\t\t\t\t\tptms.setdefault(doc, []).append(out_position)\n\t\t\t\t\ttemp_ptm = \"\"\n\t\t\t\t\tout_position = functions.remove_duplicates([info[2],info[3]])\n\t\t\t\t\ttemp_ptm = \" \".join(info[4:])\n\t\t\t\telse:\n\t\t\t\t\ttemp_ptm = temp_ptm + \" \".join(info[1:])\n\t\t\t\t\t#if \"P0C9J5\" in out_ac:\n\t\t\t\t\t# print(\"#################temp_ptm is 3 \"+temp_ptm+\"\\n\")\n\t\t\t\t\t#for i in range(1,len(info)):\n\t\t\t\t\t#\ttemp_ptm += info[i].rstrip()\n\t\t\t\t\t#print(temp_ptm+\"\\n\")\n\t\t\t\t\n\t\t\t\tprev_fp_pos = fp.tell()\n\t\t\t\tline = ' '.join(fp.readline().split())\n\t\t\t\tinfo = line.split(\" \")\n\t\t\ttemp_ptm = re.sub('(\\.*)\\)',')',temp_ptm)\n\t\t\tfor doc in ptms:\n\t\t\t\t#if \"P0C9J5\" in out_ac and doc == 'N-linked (GlcNAc) asparagine':\n\t\t\t\t#\tprint(doc+\" 4vs \"+re.sub('[\\.|\\;].*','',temp_ptm)+\"\\n\")\n\t\t\t\t\n\t\t\t\tif doc == re.sub('[\\.|\\;].*','',temp_ptm):\n\t\t\t\t\t#if \"P0C9J5\" in out_ac:\n\t\t\t\t\t#\t\tprint(\"4yes\\n\"+\"position\"+str(out_position)+\"\\n\")\n\t\t\t\t\tptms.setdefault(doc, []).append(out_position)\n\t\t\tptms = dict( [(k,list(itertools.chain.from_iterable(v))) for k,v in ptms.items() if len(v)>0])\n\t\t\tfp.seek(prev_fp_pos)\n\t\telif tag == \"SQ\":\n\t\t\tsequence = seq_read(fp)\n\t\t\tout_data = functions.merge_two_dicts(out_data,ptms)\n\t\t\tout_data['sequence'] = sequence\n\t\t\ttable.save(out_data)\n\t\t\t##rewind\n\t\t\tptms = {'Phosphoserine':[],'Phosphothreonine':[],\n\t\t\t\t'Phosphotyrosine':[],\n\t\t\t\t'N-linked (GlcNAc) asparagine':[],\n\t\t\t\t'O-linked (GlcNAc) serine':[],'O-linked (GlcNAc) threonine':[],\n\t\t\t\t'Glycyl lysine isopeptide (Lys-Gly)(interchain with G-Cter in ubiquitin)':[],\n\t\t\t\t'Glycyl lysine isopeptide (Lys-Gly)(interchain with G-Cter in SUMO)':[],\n\t\t\t\t'N6-acetyllysine':[],\n\t\t\t\t'Omega-N-methylarginine':[],'Dimethylated arginine':[],'Symmetric dimethylarginine':[],'Asymmetric dimethylarginine':[],\n\t\t\t\t'N6-methyllysine':[],'N6,N6-dimethyllysine':[],'N6,N6,N6-trimethyllysine':[],\n\t\t\t\t'Pyrrolidone carboxylic acid':[],\n\t\t\t\t'S-palmitoyl cysteine': [],\n\t\t\t\t'3-hydroxyproline':[],'4-hydroxyproline':[],#Hydroxylation P\n\t\t\t\t'4,5-dihydroxylysine':[], '3,4-dihydroxylysine':[],'5-hydroxylysine':[] #Hydroxylation K\n\t\t\t\t}\n\t\t\tout_data.clear()\n\t\t\tout_ac = []\n\t\t\tout_position = []\n\t\t\tsequence = \"\"\n\t\t\tcheck = []\n\t\t\n\t\tline = fp.readline()\n\t\n\tfp.close()\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-l', default='uniprotData/uniprot.txt',help=\"local filepath,default path can trigger auto download\")\n\tparser.add_argument('-update', type=int, default=0, help=\"update options: check every # months, default to manual(0)\")\n\tparser.add_argument('-download', type=int, default=0, help=\"whether to download uniprotData/uniprot.txt\")\n\targs = parser.parse_args()\n\tfilepath = args.l\n\t\n\tptms = {'Phosphoserine':[],'Phosphothreonine':[],\n\t\t\t'Phosphotyrosine':[],\n\t\t\t'N-linked (GlcNAc) asparagine':[],\n\t\t\t'O-linked (GlcNAc) serine':[],'O-linked (GlcNAc) threonine':[],\n\t\t\t'Glycyl lysine isopeptide (Lys-Gly)(interchain with G-Cter in ubiquitin)':[],\n\t\t\t'Glycyl lysine isopeptide (Lys-Gly)(interchain with G-Cter in SUMO)':[],\n\t\t\t'N6-acetyllysine':[],\n\t\t\t'Omega-N-methylarginine':[],'Dimethylated arginine':[],'Symmetric dimethylarginine':[],'Asymmetric dimethylarginine':[],\n\t\t\t'N6-methyllysine':[],'N6,N6-dimethyllysine':[],'N6,N6,N6-trimethyllysine':[],\n\t\t\t'Pyrrolidone carboxylic acid':[],\n\t\t\t'S-palmitoyl cysteine': [],\n\t\t\t'3-hydroxyproline':[],'4-hydroxyproline':[],#Hydroxylation P\n\t\t\t'4,5-dihydroxylysine':[], '3,4-dihydroxylysine':[],'5-hydroxylysine':[] #Hydroxylation K\n\t\t\t}\n\t\n\tif not os.path.exists(\"uniprotData\"):\n\t\tos.makedirs(\"uniprotData\")\n\t\n\tif args.download >0:\n\t\tif filepath == 'uniprotData/uniprot.txt':\n\t\t\tfunctions.getUniprot()\n\t\n\tif os.path.exists(filepath):\n\t\ttableGeneration(filepath,ptms)\n\t\tif args.update > 0:\n\t\t\ttable_date = functions.rssread()\n\t\t\tfunctions.setAutoUpdate(args.update)\n\t\t\tprint(\"Check for update every %s months!\" % (args.update))\n\t\t\tfunctions.Config_edit(table_date)\n\telse:\n\t\tprint(\"File does not exist\\n\")\n\t\tsys.exit()\n \nif __name__== \"__main__\":\n\tmain()\n\n\n\n", "sub_path": "mongoblast/codes/tableGenerator.py", "file_name": "tableGenerator.py", "file_ext": "py", "file_size_in_byte": 6272, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "functions.connectMongoDB", "line_number": 29, "usage_type": "call"}, {"api_name": "functions.remove_duplicates", "line_number": 64, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 75, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 84, "usage_type": "call"}, {"api_name": "functions.remove_duplicates", "line_number": 89, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 102, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 107, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 111, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 111, "usage_type": "attribute"}, {"api_name": "functions.merge_two_dicts", "line_number": 115, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 166, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "name"}, {"api_name": "functions.getUniprot", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 173, "usage_type": "name"}, {"api_name": "functions.rssread", "line_number": 176, "usage_type": "call"}, {"api_name": "functions.setAutoUpdate", "line_number": 177, "usage_type": "call"}, {"api_name": "functions.Config_edit", "line_number": 179, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "158668082", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n\nfrom absl import app\nfrom absl import flags\nfrom agents.dqn import dqn_agent\n# from agents.ddpg import ddpg_agent\nfrom agents.ddpg import ddpg_agent_s\nfrom agents.implicit_quantile import implicit_quantile_agent\nfrom agents.rainbow import rainbow_agent\nimport time\nimport os\nimport json\nimport numpy as np\nimport csv\n# import run_experiment\nfrom gym import spaces\nimport tensorflow as tf\nfrom pipe import make_pipe, close_pipe, open_write_pipe, open_read_pipe, write_to_pipe, read_from_pipe\n\n# share pipe in every slave\nchannel_name = \"/tmp/channel_in1.pipe\"\nspace_path = \"/tmp/space_out1.pipe\"\ngoal_path = \"/tmp/goal_in1.pipe\"\n\naction_path = \"/tmp/action_in1.pipe\"\nobs_path = \"/tmp/obs_out1.pipe\"\ntouch_path = \"/tmp/touch_out1.pipe\"\nreward_path = \"/tmp/reward_out1.pipe\"\nover_path = \"/tmp/over_out1.pipe\"\nterminal_path = \"/tmp/term_out1.pipe\"\nreset_path = \"/tmp/reset_in1.pipe\"\nwrite_name_list = [action_path, reset_path]\nread_name_list = [obs_path, touch_path, reward_path, over_path, terminal_path]\n\nchannel_pipe = open_write_pipe(channel_name)\nwrite_to_pipe(channel_pipe, 0)\ncomplete_pipe = open_write_pipe(\"/tmp/complete.pipe\")\nwrite_to_pipe(complete_pipe, 0)\ngoal_pipe = open_write_pipe(goal_path)\n\nagent_name = 'ddpg'\ndebug_mode = False\n\ndef create_agent(sess, summary_writer=None):\n\n # s = os.open(space_path, os.O_RDONLY)\n s = open_read_pipe(space_path)\n # space = json.loads(os.read(s,1024).decode())\n space = read_from_pipe(s)\n close_pipe([channel_pipe, complete_pipe])\n if not debug_mode:\n summary_writer = None\n if agent_name == 'ddpg':\n os.close(s)\n return ddpg_agent_s.DDPGAgent(sess, action_space=spaces.Box(space[0], space[1], shape=space[2], dtype=np.float32), #num_actions=space,\n summary_writer=summary_writer)\n elif agent_name == 'dqn':\n os.close(s)\n return dqn_agent.DQNAgent(sess, num_actions=space,\n summary_writer=summary_writer)\n elif agent_name == 'rainbow':\n os.close(s)\n return rainbow_agent.RainbowAgent(\n sess, num_actions=space,\n summary_writer=summary_writer)\n elif agent_name == 'implicit_quantile':\n os.close(s)\n return implicit_quantile_agent.ImplicitQuantileAgent(\n sess, num_actions=space,\n summary_writer=summary_writer)\n else:\n os.close(s)\n raise ValueError('Unknown agent: {}'.format(agent_name))\n\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\nwith tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n \n agent = create_agent(sess)\n agent.eval_mode = True\n # length = round(np.random.uniform(0.5, 0.7), 4)\n # theta = round(np.random.uniform(0, np.pi/2), 4)\n length = np.linspace(0.53, 0.7, 5)\n theta = np.linspace(0.3, np.pi/2, 20)\n statistic = []\n # filename = 'statistic/DDPG_mlit_4s_v0508_3w30_rndinit.csv'\n # outfile = open(filename, 'w', newline='')\n # Run every checkpoint\n for i in range(10):\n filename = 'statistic/test_wait/test_wait%d.csv'%i\n outfile = open(filename, 'w', newline='')\n\n agent._saver.restore(sess, \"model/test_wait/checkpoints/tf_ckpt-%d\"%i)\n ckpt0 = []\n ckpt1 = []\n ckpt2 = []\n print('iterations %d, traning step %d'%(i,(i+1)*30000))\n # Run 100 episodes\n for j in range(100):\n l = int(j/20)\n t = j%20\n\n total_reward = 0\n is_terminal = False\n channel_pipe = open_write_pipe(channel_name)\n write_to_pipe(channel_pipe, 0)\n complete_pipe = open_write_pipe(\"/tmp/complete.pipe\")\n write_to_pipe(complete_pipe, 1)\n\n write_to_pipe(goal_pipe, [round(length[l],4), round(theta[t], 4)])\n\n action_pipe, reset_pipe = open_write_pipe(write_name_list)\n obs_pipe, touch_pipe, reward_pipe, over_pipe, terminal_pipe = open_read_pipe(read_name_list)\n \"\"\"initial_observation_list = read_from_pipe(obs_pipe)\"\"\"\n \"\"\"initial_observation = np.asarray(initial_observation_list)\"\"\"\n initial_observation = np.zeros([100,100,3])\n initial_state_list = read_from_pipe(touch_pipe)\n initial_state = np.asarray(initial_state_list)\n action = agent.begin_episode(initial_observation, initial_state)\n time.sleep(0.032)\n print('episodes %d'%j)\n episode_distance = []\n cnt1 = 0\n cnt2 = 0\n step_cnt = 0\n while 1:\n action = action.tolist()\n write_to_pipe(action_pipe, action)\n \n state = read_from_pipe(touch_pipe)\n state = np.asarray(state)\n reward = read_from_pipe(reward_pipe)\n # reward = np.clip(reward, -1, 1)\n is_terminal = read_from_pipe(terminal_pipe)\n # print('distance', reward)\n episode_distance.append(reward)\n if reward < 0.02:\n cnt1 = 1\n cnt2 += 1\n if cnt2 == 20:\n is_terminal = True\n else:\n cnt2 = 0\n\n step_cnt += 1\n if step_cnt == 400:\n is_terminal = True\n\n \"\"\"observation = read_from_pipe(obs_pipe)\n observation = np.asarray(observation)\"\"\"\n observation = np.zeros([100,100,3])\n over = read_from_pipe(over_pipe)\n over = over or is_terminal\n # os.write(reset_p, json.dumps(is_terminal).encode())\n write_to_pipe(reset_pipe, over)\n if over:\n close_pipe([obs_pipe, touch_pipe, reward_pipe, over_pipe, terminal_pipe, action_pipe, reset_pipe, channel_pipe, complete_pipe])\n # print(\"broken\")\n time.sleep(0.032)\n break\n else:\n action = agent.step(reward, observation, state)\n \n if np.mean(episode_distance[-20:]) < 0.01:\n ckpt0.append(1)\n elif np.mean(episode_distance[-20:]) < 0.02:\n ckpt0.append(2)\n else:\n ckpt0.append(0)\n # if cnt == 20:\n if cnt1:\n ckpt1.append(1)\n else:\n ckpt1.append(0)\n \n if cnt2 == 20:\n ckpt2.append(1)\n else:\n ckpt2.append(0)\n succ_rate = np.sum(np.array(ckpt0)>0)/len(ckpt0)\n ckpt0.insert(0, succ_rate)\n\n succ_rate = np.sum(np.array(ckpt1)>0)/len(ckpt1)\n ckpt1.insert(0, succ_rate)\n\n succ_rate = np.sum(np.array(ckpt2)>0)/len(ckpt2)\n ckpt2.insert(0, succ_rate)\n\n writer = csv.writer(outfile)\n writer.writerow(ckpt0)\n writer.writerow(ckpt1)\n writer.writerow(ckpt2)\n outfile.close()", "sub_path": "webots_dopamine/Dopamine_Webots/controllers/endpoint2D/endpoint_evl.py", "file_name": "endpoint_evl.py", "file_ext": "py", "file_size_in_byte": 7033, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pipe.open_write_pipe", "line_number": 39, "usage_type": "call"}, {"api_name": "pipe.write_to_pipe", "line_number": 40, "usage_type": "call"}, {"api_name": "pipe.open_write_pipe", "line_number": 41, "usage_type": "call"}, {"api_name": "pipe.write_to_pipe", "line_number": 42, "usage_type": "call"}, {"api_name": "pipe.open_write_pipe", "line_number": 43, "usage_type": "call"}, {"api_name": "pipe.open_read_pipe", "line_number": 51, "usage_type": "call"}, {"api_name": "pipe.read_from_pipe", "line_number": 53, "usage_type": "call"}, {"api_name": "pipe.close_pipe", "line_number": 54, "usage_type": "call"}, {"api_name": "os.close", "line_number": 58, "usage_type": "call"}, {"api_name": "agents.ddpg.ddpg_agent_s.DDPGAgent", "line_number": 59, "usage_type": "call"}, {"api_name": "agents.ddpg.ddpg_agent_s", "line_number": 59, "usage_type": "name"}, {"api_name": "gym.spaces.Box", "line_number": 59, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.close", "line_number": 62, "usage_type": "call"}, {"api_name": "agents.dqn.dqn_agent.DQNAgent", "line_number": 63, "usage_type": "call"}, {"api_name": "agents.dqn.dqn_agent", "line_number": 63, "usage_type": "name"}, {"api_name": "os.close", "line_number": 66, "usage_type": "call"}, {"api_name": "agents.rainbow.rainbow_agent.RainbowAgent", "line_number": 67, "usage_type": "call"}, {"api_name": "agents.rainbow.rainbow_agent", "line_number": 67, "usage_type": "name"}, {"api_name": "os.close", "line_number": 71, "usage_type": "call"}, {"api_name": "agents.implicit_quantile.implicit_quantile_agent.ImplicitQuantileAgent", "line_number": 72, "usage_type": "call"}, {"api_name": "agents.implicit_quantile.implicit_quantile_agent", "line_number": 72, "usage_type": "name"}, {"api_name": "os.close", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.GPUOptions", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pipe.open_write_pipe", "line_number": 108, "usage_type": "call"}, {"api_name": "pipe.write_to_pipe", "line_number": 109, "usage_type": "call"}, {"api_name": "pipe.open_write_pipe", "line_number": 110, "usage_type": "call"}, {"api_name": "pipe.write_to_pipe", "line_number": 111, "usage_type": "call"}, {"api_name": "pipe.write_to_pipe", "line_number": 113, "usage_type": "call"}, {"api_name": "pipe.open_write_pipe", "line_number": 115, "usage_type": "call"}, {"api_name": "pipe.open_read_pipe", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 119, "usage_type": "call"}, {"api_name": "pipe.read_from_pipe", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 121, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 123, "usage_type": "call"}, {"api_name": "pipe.write_to_pipe", "line_number": 131, "usage_type": "call"}, {"api_name": "pipe.read_from_pipe", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 134, "usage_type": "call"}, {"api_name": "pipe.read_from_pipe", "line_number": 135, "usage_type": "call"}, {"api_name": "pipe.read_from_pipe", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 154, "usage_type": "call"}, {"api_name": "pipe.read_from_pipe", "line_number": 155, "usage_type": "call"}, {"api_name": "pipe.write_to_pipe", "line_number": 158, "usage_type": "call"}, {"api_name": "pipe.close_pipe", "line_number": 160, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "647142002", "text": "import cv2\n\nI = cv2.imread('lena.png', 0)\n\nI_gauss = cv2.GaussianBlur(I, (3, 3), 1)\nI_sobel = cv2.Sobel(I,cv2.CV_64F,1,0,ksize=5)\nI_laplacian = cv2.Laplacian(I, cv2.CV_64F)\nI_median = cv2.medianBlur(I, 5)\n\ncv2.imshow(\"Gauss\", I_gauss)\ncv2.imshow(\"Sobel\", I_sobel)\ncv2.imshow(\"Laplacian\", I_laplacian)\ncv2.imshow(\"Median\", I_median)\ncv2.waitKey(0)", "sub_path": "lab1_intro/zad8.py", "file_name": "zad8.py", "file_ext": "py", "file_size_in_byte": 346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cv2.imread", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.Sobel", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.Laplacian", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.medianBlur", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "572013871", "text": "from django.db.models.deletion import CASCADE\r\nfrom django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom rest_framework.response import Response\r\nfrom django.shortcuts import get_object_or_404, render\r\nfrom rest_framework.decorators import api_view\r\nfrom django.http import Http404\r\nfrom django.urls import reverse\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom django.template import loader\r\nfrom django.views import generic\r\nfrom django.utils import timezone\r\nfrom rest_framework.viewsets import ModelViewSet\r\nfrom .models import Seller, Category, Product\r\n\r\nfrom .serializers import SellerSerializer, ProductSerializer, CategorySerializer\r\n\r\n\r\nclass ProductList:\r\n @api_view(['GET'])\r\n def apiOverview(request):\r\n api_urls = {\r\n 'List': '/product-list/',\r\n 'Details View': '/product-detail/',\r\n 'Create': '/product-create/',\r\n 'Update': 'product-update/',\r\n 'Delete': '/product-detail/ ',\r\n }\r\n\r\n return Response(api_urls)\r\n\r\n @api_view(['GET'])\r\n def showAll(request):\r\n products = Product.objects.all()\r\n serializer = ProductSerializer(products, many=True)\r\n return Response(serializer.data)\r\n\r\n @api_view(['GET'])\r\n def viewProduct(request, pk):\r\n product = Product.objects.get(id=pk)\r\n serializer = ProductSerializer(product, many=False)\r\n return Response(serializer.data)\r\n\r\n @api_view(['POST'])\r\n def createProduct(request):\r\n serializer = ProductSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n\r\n @api_view(['POST'])\r\n def updateProduct(request, pk):\r\n product = Product.objects.get(id=pk)\r\n serializer = ProductSerializer(instance=product, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n\r\n @api_view(['GET'])\r\n def deleteProduct(request, pk):\r\n product = Product.objects.get(id=pk)\r\n product.delete()\r\n\r\n return Response('Items deleted successfully')\r\n\r\n\r\nclass CategoryList(ModelViewSet):\r\n serializer_class = CategorySerializer\r\n queryset = Category.objects.all()\r\n\r\n @api_view(['GET'])\r\n def apiOverview(request):\r\n api_urls = {\r\n 'List': '/category-list/',\r\n 'Details View': '/category-detail/',\r\n 'Create': '/category-create/',\r\n 'Update': 'category-update/',\r\n 'Delete': '/category-detail/ ',\r\n }\r\n\r\n return Response(api_urls)\r\n\r\n @api_view(['GET'])\r\n def showAll(request):\r\n categories = Category.objects.all()\r\n serializer = CategorySerializer(categories, many=True)\r\n return Response(serializer.data)\r\n\r\n @api_view(['GET'])\r\n def viewCategory(request, pk):\r\n category = Category.objects.get(id=pk)\r\n serializer = CategorySerializer(category, many=False)\r\n return Response(serializer.data)\r\n\r\n @api_view(['POST'])\r\n def createCategory(request):\r\n serializer = CategorySerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n\r\n @api_view(['POST'])\r\n def updateCategory(request, pk):\r\n category = Category.objects.get(id=pk)\r\n serializer = CategorySerializer(instance=category, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n\r\n @api_view(['GET'])\r\n def deleteCategory(request, pk):\r\n category = Category.objects.get(id=pk)\r\n category.delete()\r\n\r\n return Response('Items deleted successfully')\r\n\r\n\r\nclass SellerList(ModelViewSet):\r\n @api_view(['GET'])\r\n def apiOverview(request):\r\n api_urls = {\r\n 'List': '/seller-list/',\r\n 'Details View': '/sellerdetail/',\r\n 'Create': '/seller-create/',\r\n 'Update': 'seller-update/',\r\n 'Delete': '/seller-detail/ ',\r\n }\r\n\r\n return Response(api_urls)\r\n\r\n @api_view(['GET'])\r\n def showAll(request):\r\n seller = Seller.objects.all()\r\n serializer = CategorySerializer(seller, many=True)\r\n return Response(serializer.data)\r\n\r\n @api_view(['GET'])\r\n def viewSeller(request, pk):\r\n seller = Seller.objects.get(id=pk)\r\n serializer = CategorySerializer(seller, many=False)\r\n return Response(serializer.data)\r\n\r\n\r\n @api_view(['POST'])\r\n def createSeller(request):\r\n serializer = SellerSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n\r\n\r\n @api_view(['POST'])\r\n def updateSeller(request, pk):\r\n seller = Seller.objects.get(id=pk)\r\n serializer = SellerSerializer(instance=seller, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n\r\n @api_view(['GET'])\r\n def deleteSeller(request, pk):\r\n seller = Category.objects.get(id=pk)\r\n seller.delete()\r\n\r\n return Response('Items deleted successfully')\r\n\r\n\r\n", "sub_path": "mysite/myapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Product.objects.all", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 34, "usage_type": "name"}, {"api_name": "serializers.ProductSerializer", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Product.objects.get", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 40, "usage_type": "name"}, {"api_name": "serializers.ProductSerializer", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 38, "usage_type": "call"}, {"api_name": "serializers.ProductSerializer", "line_number": 46, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 44, "usage_type": "call"}, {"api_name": "models.Product.objects.get", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 54, "usage_type": "name"}, {"api_name": "serializers.ProductSerializer", "line_number": 55, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 59, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Product.objects.get", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 63, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 69, "usage_type": "name"}, {"api_name": "serializers.CategorySerializer", "line_number": 70, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 71, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 71, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 83, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Category.objects.all", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 87, "usage_type": "name"}, {"api_name": "serializers.CategorySerializer", "line_number": 88, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 89, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 85, "usage_type": "call"}, {"api_name": "models.Category.objects.get", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 93, "usage_type": "name"}, {"api_name": "serializers.CategorySerializer", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 95, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 91, "usage_type": "call"}, {"api_name": "serializers.CategorySerializer", "line_number": 99, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 103, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 97, "usage_type": "call"}, {"api_name": "models.Category.objects.get", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 107, "usage_type": "name"}, {"api_name": "serializers.CategorySerializer", "line_number": 108, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 112, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Category.objects.get", "line_number": 116, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 116, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 119, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 114, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 122, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 133, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Seller.objects.all", "line_number": 137, "usage_type": "call"}, {"api_name": "models.Seller.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "models.Seller", "line_number": 137, "usage_type": "name"}, {"api_name": "serializers.CategorySerializer", "line_number": 138, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 139, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 135, "usage_type": "call"}, {"api_name": "models.Seller.objects.get", "line_number": 143, "usage_type": "call"}, {"api_name": "models.Seller.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.Seller", "line_number": 143, "usage_type": "name"}, {"api_name": "serializers.CategorySerializer", "line_number": 144, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 145, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 141, "usage_type": "call"}, {"api_name": "serializers.SellerSerializer", "line_number": 150, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 154, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 148, "usage_type": "call"}, {"api_name": "models.Seller.objects.get", "line_number": 159, "usage_type": "call"}, {"api_name": "models.Seller.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "models.Seller", "line_number": 159, "usage_type": "name"}, {"api_name": "serializers.SellerSerializer", "line_number": 160, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 164, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 157, "usage_type": "call"}, {"api_name": "models.Category.objects.get", "line_number": 168, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 168, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 168, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 171, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "174565343", "text": "import pandas as pd\nimport json\n\nclass OpenResultFile:\n def __init__(self):\n data = self.open_file('./pickles/files/sitemap_01903.json')\n self.print_data(data)\n\n @staticmethod\n def open_file(path):\n with open(path) as f:\n data = json.load(f)\n f.close()\n mapping = data.get('mapping')\n return [{'url': entry.get('url'), 'df': pd.read_json(entry.get('df'))} for entry in mapping]\n\n def print_data(self, result_array,):\n for entry in result_array:\n print(entry.get('url'))\n print(entry.get('df').to_string())\n\n\nif __name__ == \"__main__\":\n OpenResultFile()", "sub_path": "open_result_file.py", "file_name": "open_result_file.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "284388212", "text": "from mesa.datacollection import DataCollector\nfrom mesa import Model\nfrom mesa.time import RandomActivation\nfrom mesa_geo.geoagent import GeoAgent, AgentCreator\nfrom mesa_geo import GeoSpace\nimport random\nfrom agent import *\nimport numpy as np\nimport pprint\n\nclass RegionModel(Model):\n def __init__(self,\n # basic_trade_reward, member_trade_reward,\n international_trade, max_eff, eutax, neighbor_influence,\n tax_influence, member_trade_multiplier, randomness, eu_strategy):\n\n # set up parameters\n # self.basic_trade_reward = basic_trade_reward\n # self.member_trade_reward = member_trade_reward\n self.international_trade = international_trade\n self.max_eff = max_eff\n self.eutax = eutax\n self.neighbor_influence = neighbor_influence\n self.tax_influence = tax_influence\n self.member_trade_multiplier = member_trade_multiplier\n self.randomness = randomness\n self.eu_strategy = eu_strategy\n\n # initialise other attributes\n self.member_count = 0\n self.other_count = 0\n self.treasury = 0\n self.total_wealth = 0\n self.member_wealth = 0\n self.other_wealth = 0\n self.total_eff = 0\n self.member_eff = 0\n self.other_eff = 0\n self.round = 0\n self.schedule = RandomActivation(self)\n self.grid = GeoSpace()\n self.running = True\n\n # set up grid\n AC = AgentCreator(RegionAgent, {\"model\": self})\n self.agents = AC.from_file(\"nuts_rg_60M_2013_lvl_2.geojson\")\n self.grid.add_agents(self.agents)\n\n # set up agents\n for agent in self.agents:\n self.schedule.add(agent)\n cooperativeness = random.uniform(-1, 1)\n agent.cooperativeness = cooperativeness\n agent.strategy = 1 if cooperativeness > 0 else 2\n agent.wealth = 1\n agent.efficiency = max(random.random() * self.max_eff, 0.0000001)\n # agent.efficiency = random.uniform(1, self.max_eff * 2)\n # agent.efficiency = random.uniform()\n # agent.efficiency = agent.SHAPE_AREA * max_eff\n agent.tax = 0\n agent.trade_bonus = 0\n # agent.trade_bonus = 0\n \n # set up datacollector\n self.datacollector = DataCollector({\n \"member_count\": \"member_count\",\n \"other_count\":\"other_count\",\n \"average_cooperativeness\":\"average_cooperativeness\",\n \"other_wealth\":\"other_wealth\",\n \"total_wealth\":\"total_wealth\",\n \"member_wealth\":\"member_wealth\",\n \"other_eff\":\"other_eff\",\n \"total_eff\":\"total_eff\",\n \"member_eff\":\"member_eff\"\n })\n self.datacollector.collect(self)\n\n\n\n def compute_statistics(self):\n # only used for datacollector\n\n self.member_count = 0\n self.other_count = 0\n\n self.member_wealth = 0\n self.other_wealth = 0\n self.total_wealth = 0\n\n self.member_eff = 0\n self.other_eff = 0\n self.total_eff = 0\n\n total_cooperativeness = 0\n\n for agent in self.agents:\n if agent.strategy == 1:\n self.member_wealth += agent.wealth\n self.member_eff += agent.efficiency\n self.member_count += 1\n else:\n self.other_wealth += agent.wealth\n self.other_eff += agent.efficiency\n self.other_count += 1\n total_cooperativeness += agent.cooperativeness\n \n self.average_cooperativeness = total_cooperativeness / 320\n\n self.total_wealth = self.member_wealth + self.other_wealth\n self.member_wealth = self.member_wealth / max(self.member_count, 1)\n self.other_wealth = self.other_wealth / max(self.other_count, 1)\n self.total_wealth = self.total_wealth / 320\n\n self.total_eff = self.member_eff + self.other_eff\n self.member_eff = self.member_eff / max(self.member_count, 1)\n self.other_eff = self.other_eff / max(self.other_count, 1)\n self.total_eff = self.total_eff / 320\n\n\n\n def collect_taxes(self):\n members = [agent for agent in self.agents if agent.strategy == 1]\n if not members:\n self.running = False\n return\n for agent in members:\n tax = agent.wealth * self.eutax\n agent.tax_payed = tax\n agent.wealth -= tax\n self.treasury += tax\n\n\n\n def distribute_benefits(self):\n members = [agent for agent in self.agents if agent.strategy == 1]\n if not members:\n self.running = False\n return\n\n if self.eu_strategy == \"default\":\n benefit = self.treasury / len(members)\n for agent in members:\n agent.wealth += benefit\n if benefit + agent.trade_bonus > agent.tax_payed:\n agent.cooperativeness = min(agent.cooperativeness + self.tax_influence, 1)\n elif benefit + agent.trade_bonus < agent.tax_payed:\n agent.cooperativeness = max(agent.cooperativeness - self.tax_influence, -1)\n\n elif self.eu_strategy == \"hardship\":\n total_hardship = 0\n for agent in members:\n total_hardship += 1 - agent.cooperativeness\n for agent in members:\n agent_benefit = ((1 - agent.cooperativeness) / total_hardship) * self.treasury\n agent.wealth += agent_benefit\n if agent_benefit + agent.trade_bonus > agent.tax_payed:\n agent.cooperativeness = min(agent.cooperativeness + self.tax_influence, 1)\n elif agent_benefit + agent.trade_bonus < agent.tax_payed:\n agent.cooperativeness = max(agent.cooperativeness - self.tax_influence, -1)\n \n else:\n # unknown strategy\n raise NotImplementedError()\n\n self.treasury = 0\n\n\n\n def compute_virtual_benefits(self):\n others = [agent for agent in self.agents if agent.strategy == 2]\n members = [agent for agent in self.agents if agent.strategy == 1]\n\n if not members or not others:\n self.running = False\n return\n\n if self.eu_strategy == \"default\":\n for agent in others:\n virtual_tax_payed = agent.wealth * self.eutax\n virtual_treasury = self.treasury + virtual_tax_payed\n virtual_benefit = virtual_treasury / (len(members) + 1)\n if virtual_benefit + agent.trade_bonus > virtual_tax_payed:\n agent.cooperativeness = min(agent.cooperativeness + self.tax_influence, 1)\n elif virtual_benefit + agent.trade_bonus < virtual_tax_payed:\n agent.cooperativeness = max(agent.cooperativeness - self.tax_influence, -1)\n\n elif self.eu_strategy == \"hardship\":\n total_hardship = 0\n for agent in members:\n total_hardship += 1 - agent.cooperativeness\n for agent in others:\n virtual_tax_payed = agent.wealth * self.eutax\n virtual_treasury = self.treasury + virtual_tax_payed\n virtual_benefit = (1 / (total_hardship + 1)) * virtual_treasury\n # hardship of every defector is = max hardship = 1\n if virtual_benefit + agent.trade_bonus > virtual_tax_payed:\n agent.cooperativeness = min(agent.cooperativeness + self.tax_influence, 1)\n elif virtual_benefit + agent.trade_bonus < virtual_tax_payed:\n agent.cooperativeness = max(agent.cooperativeness - self.tax_influence, -1)\n \n else:\n # unknown strategy\n raise NotImplementedError()\n\n\n\n def step(self):\n for agent in self.agents: agent.has_traded = False\n self.round += 1\n\n self.schedule.step()\n \n self.compute_statistics()\n\n self.collect_taxes()\n self.compute_virtual_benefits()\n self.distribute_benefits()\n\n self.datacollector.collect(self)", "sub_path": "model/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 8154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "mesa.Model", "line_number": 11, "usage_type": "name"}, {"api_name": "mesa.time.RandomActivation", "line_number": 40, "usage_type": "call"}, {"api_name": "mesa_geo.GeoSpace", "line_number": 41, "usage_type": "call"}, {"api_name": "mesa_geo.geoagent.AgentCreator", "line_number": 45, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 52, "usage_type": "call"}, {"api_name": "agent.cooperativeness", "line_number": 53, "usage_type": "attribute"}, {"api_name": "agent.strategy", "line_number": 54, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 55, "usage_type": "attribute"}, {"api_name": "agent.efficiency", "line_number": 56, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 56, "usage_type": "call"}, {"api_name": "agent.tax", "line_number": 60, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mesa.datacollection.DataCollector", "line_number": 65, "usage_type": "call"}, {"api_name": "agent.strategy", "line_number": 97, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 98, "usage_type": "attribute"}, {"api_name": "agent.efficiency", "line_number": 99, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 102, "usage_type": "attribute"}, {"api_name": "agent.efficiency", "line_number": 103, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 105, "usage_type": "attribute"}, {"api_name": "agent.strategy", "line_number": 122, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 127, "usage_type": "attribute"}, {"api_name": "agent.tax_payed", "line_number": 128, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 129, "usage_type": "attribute"}, {"api_name": "agent.strategy", "line_number": 135, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 143, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 144, "usage_type": "attribute"}, {"api_name": "agent.tax_payed", "line_number": 144, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 145, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 146, "usage_type": "attribute"}, {"api_name": "agent.tax_payed", "line_number": 146, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 147, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 152, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 154, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 155, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 156, "usage_type": "attribute"}, {"api_name": "agent.tax_payed", "line_number": 156, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 157, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 158, "usage_type": "attribute"}, {"api_name": "agent.tax_payed", "line_number": 158, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 159, "usage_type": "attribute"}, {"api_name": "agent.strategy", "line_number": 170, "usage_type": "attribute"}, {"api_name": "agent.strategy", "line_number": 171, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 179, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 182, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 183, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 184, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 185, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 190, "usage_type": "attribute"}, {"api_name": "agent.wealth", "line_number": 192, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 196, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 197, "usage_type": "attribute"}, {"api_name": "agent.trade_bonus", "line_number": 198, "usage_type": "attribute"}, {"api_name": "agent.cooperativeness", "line_number": 199, "usage_type": "attribute"}, {"api_name": "agent.has_traded", "line_number": 208, "usage_type": "attribute"}]} +{"seq_id": "641251164", "text": "# -*- coding: utf-8 -*-\nimport os\n\nimport unittest\nimport settings\nfrom time import sleep\nfrom BasicMethods import Page, Component, AuthPage\n\nfrom selenium.webdriver import DesiredCapabilities, Remote\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\nclass GroupMessagesPage(Page):\n BASE_URL = 'https://ok.ru/messages/c68009778785062'\n\n @property\n def messages_menu(self):\n return MessagesMenu(self.driver)\n\n\nclass MessagesMenu(Component):\n ATTACH_BUTTON_TRIG = '//span[@class=\"comments_attach_trigger\"]'\n ATTACH_AUDIO_MSG_BUTTON = '//span[@class=\"comments_attach_trigger\"]/div[2]/div/div/ul/li[1]'\n AUDIO_MSG_POPUP = '//object[@class=\"vchat_flash_app\"]'\n PLAY_BUTTON = '//div[@class=\"msg_audio\"]/div[@class=\"msg_audio_play\"]/'#/div[last()]/div[@class=\"msg_cnt\"]'#/div[]/div[@class=\"js-msg-attach\"]/div/div'\n def get_button_attach(self):\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.ATTACH_BUTTON_TRIG)\n )\n self.driver.find_element_by_xpath(self.ATTACH_BUTTON_TRIG).click()\n\n def get_button_videomessage(self):\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.ATTACH_AUDIO_MSG_BUTTON)\n )\n self.driver.find_element_by_xpath(self.ATTACH_AUDIO_MSG_BUTTON).click()\n\n def get_videomessage_popup(self):\n return WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.AUDIO_MSG_POPUP)\n )\n\nclass AudioMessagesTest(unittest.TestCase):\n USERS_COUNT = u'3 участника'\n\n def setUp(self):\n browser = os.environ.get('BROWSER', 'FIREFOX')\n\n self.driver = Remote(\n command_executor='http://127.0.0.1:4444/wd/hub',\n desired_capabilities=getattr(DesiredCapabilities, browser).copy()\n )\n\n auth_page = AuthPage(self.driver) # Auth here\n auth_page.authorize()\n\n self.message_page = GroupMessagesPage(self.driver)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_audiomessage_window_opens(self):\n self.message_page.open()\n self.message_page.messages_menu.get_button_attach()\n\n self.message_page.messages_menu.get_button_videomessage()\n\n audio_msg_popup = self.message_page.messages_menu.get_videomessage_popup()\n #проверяем выскочил ли object с флешом\n self.assertIsNotNone(audio_msg_popup)\n\n def test_audiomessage_play_and_stop_message(self):\n PLAY_STOP_BUTTON = '//div[@class=\"msg_audio_play\"]'\n self.message_page.open()\n last_audio_msg = self.driver.find_element_by_css_selector('.msg_audio:last-child')\n play_button = last_audio_msg.find_element_by_xpath(PLAY_STOP_BUTTON)\n WebDriverWait(last_audio_msg, 60, 0.1).until(\n lambda d: d.find_element_by_xpath(PLAY_STOP_BUTTON)\n )\n play_button.click()\n # проверяем иконка стоп\n self.assertNotEqual(last_audio_msg.get_attribute(\"class\").rfind(\"st_play\"), -1)\n stop_button = last_audio_msg.find_element_by_xpath(PLAY_STOP_BUTTON)\n WebDriverWait(last_audio_msg, 60, 0.1).until(\n lambda d: d.find_element_by_xpath(PLAY_STOP_BUTTON)\n )\n stop_button.click()\n # проверяем иконка плей\n self.assertNotEqual(last_audio_msg.get_attribute(\"class\").rfind(\"st_stop\"), -1)\n", "sub_path": "tests/audio_messages_test.py", "file_name": "audio_messages_test.py", "file_ext": "py", "file_size_in_byte": 3452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "BasicMethods.Page", "line_number": 13, "usage_type": "name"}, {"api_name": "BasicMethods.Component", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 39, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 47, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Remote", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.DesiredCapabilities", "line_number": 51, "usage_type": "argument"}, {"api_name": "BasicMethods.AuthPage", "line_number": 54, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 77, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "136364807", "text": "import cv2\nimport numpy as np\nimport scipy.interpolate\n\n\ndef create_curve_func(points):\n \"\"\"Return a function derived from control points.\"\"\"\n if points is None:\n return None\n num_points = len(points)\n if num_points < 2:\n return None\n xs, ys = zip(*points)\n if num_points < 4:\n kind = 'linear'\n else:\n kind = 'cubic'\n return scipy.interpolate.interp1d(xs, ys, kind, bounds_error=False)\n\n\ndef create_lookup_array(func, length=256):\n \"\"\"Return a lookup for whole-number inputs to a function.\n The lookup values are clamped to [0, length - 1].\"\"\"\n if func is None:\n return None\n lookup_array = np.empty(length)\n i = 0\n while i < length:\n func_i = func(i)\n lookup_array[i] = min((max(0, func_i), length - 1))\n i += 1\n return lookup_array\n\n\ndef apply_lookup_array(lookup_array, src, dst):\n \"\"\"使用lookup数组,映射一组src到dst中\"\"\"\n if lookup_array is None:\n return\n dst[:] = lookup_array[src]\n\n\ndef create_composite_func(func0, func1):\n \"\"\"返回两个函数的阻组合函数\"\"\"\n if func0 is None:\n return func1\n if func1 is None:\n return func0\n return lambda x: func0(func1(x))\n\ndef create_flat_view(array):\n \"\"\"返回数组的一维视图\"\"\"\n flat_view = array.view()\n flat_view.shape = array.size\n return flat_view\n\n\n", "sub_path": "my_cv/04/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scipy.interpolate.interpolate.interp1d", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.interpolate.interpolate", "line_number": 18, "usage_type": "attribute"}, {"api_name": "scipy.interpolate", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "405914596", "text": "import copy\n\nimport torch\nfrom continous_action_RL.loss_fn import ActorLoss, Retrace\nfrom continous_action_RL.utils import Utils\n\n\nclass Learner:\n def __init__(self,\n actor,\n critic,\n trajectory_length,\n discount_factor=0.99,\n actor_lr=2e-4,\n critic_lr=2e-4,\n entropy_regularization=1e-3,\n trust_region_coeff=0,\n gradient_clip_val=None,\n num_training_iter=100,\n update_targnets_every=20,\n expectation_samples=10,\n minibatch_size=32,\n logger=None):\n\n self.device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\n self.actor = actor\n self.critic = critic\n\n self.logger = logger\n self.log_step = 0\n self.trajectory_length = trajectory_length\n self.discount_factor = discount_factor\n self.target_actor = copy.deepcopy(actor).to(self.device)\n self.target_critic = copy.deepcopy(critic).to(self.device)\n Utils.freeze_net(self.target_actor)\n Utils.freeze_net(self.target_critic)\n self.actor_opt = torch.optim.Adam(params=actor.parameters(), lr=actor_lr)\n self.critic_opt = torch.optim.Adam(params=critic.parameters(), lr=critic_lr)\n\n self.num_training_iter = num_training_iter\n self.update_targnets_every = update_targnets_every\n self.expectation_samples = expectation_samples\n self.minibatch_size = minibatch_size\n self.gradient_clip_val = gradient_clip_val\n\n self.num_actions = actor.num_actions\n self.num_obs = actor.num_obs\n\n self.trust_region_coeff = trust_region_coeff\n\n self.actor_loss = ActorLoss(alpha=entropy_regularization)\n self.critic_loss = Retrace()\n\n def learn(self, replay_buffer):\n \"\"\"Update the actor and critic networks using trajectories from the replay buffer.\n\n Args:\n replay_buffer: Replay buffer containing trajectories.\n\n Returns:\n No return value\n \"\"\"\n for i in range(self.num_training_iter):\n\n # Update the target networks\n if i % self.update_targnets_every == 0:\n self.update_targnets()\n\n self.actor.train()\n self.critic.train()\n\n trajectories = replay_buffer.sample(self.minibatch_size)\n state_batch, action_batch, reward_batch, action_prob_batch \\\n = Utils.create_batches(trajectories=trajectories,\n trajectory_length=self.trajectory_length,\n minibatch_size=self.minibatch_size,\n num_obs=self.num_obs,\n num_actions=self.num_actions)\n\n # Q(a_t, s_t)\n Q = self.critic.forward(action_batch, state_batch)\n\n # Q_target(a_t, s_t)\n target_Q = self.target_critic.forward(action_batch, state_batch)\n\n # Compute 𝔼_π_target [Q(s_t,•)] with a ~ π_target(•|s_t), log(π_target(a|s))\n expected_target_Q = torch.zeros_like(reward_batch)\n mean, std = self.target_actor.forward(state_batch)\n mean = mean.to(self.device)\n std = std.to(self.device)\n for _ in range(self.expectation_samples):\n action_sample, _ = self.target_actor.action_sample(mean, std)\n expected_target_Q += self.target_critic.forward(action_sample, state_batch)\n expected_target_Q /= self.expectation_samples\n\n # log(π_target(a_t | s_t))\n target_action_log_prob = self.target_actor.get_log_prob(action_batch, mean, std)\n\n # a ~ π(•|s_t), log(π(a|s))\n m, s = self.actor.forward(state_batch)\n actions, action_log_prob = self.actor.action_sample(m, s)\n actions.to(self.device)\n if i == 0:\n old_mean = m.detach()\n old_std = s.detach()\n\n # Q(a, s_t)\n current_Q = self.critic.forward(actions, state_batch)\n\n # Critic update\n self.actor.eval()\n self.critic.train()\n self.critic_opt.zero_grad()\n\n critic_loss = self.critic_loss.forward(Q=Q.squeeze(-1),\n expected_target_Q=expected_target_Q.squeeze(-1),\n target_Q=target_Q.squeeze(-1),\n rewards=reward_batch.squeeze(-1),\n target_policy_probs=target_action_log_prob.squeeze(-1),\n behaviour_policy_probs=action_prob_batch.squeeze(-1),\n logger=self.logger)\n\n critic_loss.backward(retain_graph=True)\n\n # Actor update\n self.actor.train()\n self.critic.eval()\n self.actor_opt.zero_grad()\n\n actor_loss = self.actor_loss.forward(Q=current_Q.squeeze(-1),\n action_log_prob=action_log_prob.squeeze(-1))\n #\n # kl_div = self.actor_loss.kl_divergence(old_mean=old_mean, old_std=old_std, mean=m, std=s)\n # actor_loss += self.trust_region_coeff * kl_div\n actor_loss.backward()\n\n # Gradient update step with gradient clipping\n if self.gradient_clip_val is not None:\n torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.gradient_clip_val)\n torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.gradient_clip_val)\n\n # Keep track of different values\n if self.logger is not None and i % self.logger.log_every == 0:\n # self.logger.log_DNN_params(self.actor, name=\"Actor\")\n # self.logger.log_DNN_gradients(self.actor, name=\"Actor\")\n # self.logger.log_DNN_params(self.critic, name=\"Critic\")\n # self.logger.log_DNN_gradients(self.critic, name=\"Critic\")\n\n self.logger.add_scalar(scalar_value=actor_loss.item(), tag=\"Loss/Actor_loss\", global_step=self.log_step)\n self.logger.add_scalar(scalar_value=critic_loss.item(), tag=\"Loss/Critic_loss\",\n global_step=self.log_step)\n self.logger.add_scalar(scalar_value=std.mean().item(), tag=\"Action_std_mean\", global_step=self.log_step)\n self.logger.add_histogram(values=mean, tag=\"Statistics/Action_mean\", global_step=self.log_step)\n self.logger.add_histogram(values=std, tag=\"Statistics/Action_std\", global_step=self.log_step)\n if self.num_actions > 1:\n self.logger.add_histogram(values=actions[:, :, 0], tag=\"Action/x\", global_step=self.log_step)\n self.logger.add_histogram(values=actions[:, :, 1], tag=\"Action/y\", global_step=self.log_step)\n self.logger.add_histogram(values=actions[:, :, 2], tag=\"Action/z\", global_step=self.log_step)\n self.log_step += 1\n\n self.critic_opt.step()\n self.actor_opt.step()\n\n # old_mean = m.detach()\n # old_std = std.detach()\n\n def update_targnets(self):\n \"\"\"\n Update the target actor and the target critic by copying the parameter from the updated networks.\n\n Returns:\n No return value\n \"\"\"\n self.target_actor.load_state_dict(self.actor.state_dict())\n self.target_critic.load_state_dict(self.critic.state_dict())\n", "sub_path": "continous_action_RL/learner.py", "file_name": "learner.py", "file_ext": "py", "file_size_in_byte": 7733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 34, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 35, "usage_type": "call"}, {"api_name": "continous_action_RL.utils.Utils.freeze_net", "line_number": 36, "usage_type": "call"}, {"api_name": "continous_action_RL.utils.Utils", "line_number": 36, "usage_type": "name"}, {"api_name": "continous_action_RL.utils.Utils.freeze_net", "line_number": 37, "usage_type": "call"}, {"api_name": "continous_action_RL.utils.Utils", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 39, "usage_type": "attribute"}, {"api_name": "continous_action_RL.loss_fn.ActorLoss", "line_number": 52, "usage_type": "call"}, {"api_name": "continous_action_RL.loss_fn.Retrace", "line_number": 53, "usage_type": "call"}, {"api_name": "continous_action_RL.utils.Utils.create_batches", "line_number": 75, "usage_type": "call"}, {"api_name": "continous_action_RL.utils.Utils", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.zeros_like", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 141, "usage_type": "attribute"}]} +{"seq_id": "533852622", "text": "#\n# Copyright 2018-2019 IBM Corp. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom flask import Flask, render_template, request, jsonify\nfrom chatbot import get_question, end, get_opening_message\nimport json\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--port\", default=8070)\nargs = parser.parse_args()\n\napp = Flask(__name__)\n\n# state that the conversation with the chatbot is in\nstates = {\n 1: get_question,\n 5: end\n}\n\n\n@app.route(\"/\", methods=[\"POST\", \"GET\", \"HEAD\"])\ndef chat():\n if request.method == \"POST\":\n '''Process an ongoing conversation.'''\n data = json.loads(request.data)\n input_text = data[\"input\"]\n state = int(data[\"state\"])\n\n # gets name of the next function based on state that conversation with chatbot is in\n get_next_text = states.get(state)\n response, new_state = get_next_text(input_text)\n\n return jsonify({\"response\": response, \"state\": new_state})\n\n else:\n '''Start a conversation.'''\n return render_template(\"index.html\", display_text=get_opening_message(), state=1)\n\n\nif __name__ == \"__main__\":\n\n app.run(port=args.port, debug=True, host='localhost')\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 27, "usage_type": "call"}, {"api_name": "chatbot.get_question", "line_number": 31, "usage_type": "name"}, {"api_name": "chatbot.end", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 52, "usage_type": "call"}, {"api_name": "chatbot.get_opening_message", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "515480108", "text": "#!/usr/bin/env python3\nimport json\nimport boto3\nimport os\nimport logging\nimport sys\nimport comprehend\nimport transcribe\nimport promotion\n\nif os.environ.get('AWS_EXECUTION_ENV') is not None:\n print(f\"Executing in AWS environment {os.environ.get('AWS_EXECUTION_ENV')}\")\n TRANSCRIPTS = os.environ.get('TRANSCRIPTS')\n MP3S = os.environ.get('MP3S')\n COMPREHEND = os.environ.get('COMPREHEND')\nelse:\n print('Not executing in AWS environment')\n TRANSCRIPTS = 'transcribe.rightcall'\n MP3S = 'mp3.rightcall'\n COMPREHEND = 'comprehend.rightcall'\n\nprint(MP3S, TRANSCRIPTS, COMPREHEND)\n\n# Logging\nlogging.basicConfig()\nlogger = logging.getLogger()\nif os.getenv('LOG_LEVEL') == 'DEBUG':\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n\n\nclass ThrottlingException(Exception):\n pass\n\n\ndef Transcribe(event):\n \"\"\"\n S3 object created event received\n Get URI of file and send it to transcribe\n \"\"\"\n isSuccessful = False\n try:\n body = json.loads(event['Records'][0]['body'])\n except Exception as e:\n raise e\n if 'Records' not in body:\n logger.error('Records not in Body, likely s3 test event. Ignoring')\n logger.info('Doing nothing so that this message will be deleted \\\n from queue.')\n return False\n else:\n if len(body['Records']) > 1:\n logger.error('More than one record. May be missing a job here!')\n body = body['Records'][0]\n\n bucket = body['s3']['bucket']['name']\n key = body['s3']['object']['key']\n logger.info('Bucket Event: {}'.format(str(bucket)))\n if bucket == MP3S:\n logger.info('Bucket: {}, Key: {}'.format(\n str(bucket),\n str(key)))\n uri = 'https://s3-' + event['Records'][0]['awsRegion'] + \\\n '.amazonaws.com/' + bucket + '/' + key\n logger.info('URI: {}'.format(str(uri)))\n else:\n logger.info('Wrong Bucket')\n try:\n response = transcribe.transcribe_mp3(uri, TRANSCRIPTS)\n logger.info(response)\n job = response['TranscriptionJob']['TranscriptionJobName']\n\n except Exception as e:\n logger.error(str(e))\n raise e\n else:\n isSuccessful = True\n return {'success': isSuccessful,\n 'job_name': job}\n\n\ndef Comprehend(event):\n \"\"\"\n Cloudwatch 'Transcribe finished' event received.\n Go to destination bucket and get .json\n \"\"\"\n # Fetch json file related to completed transcribe event from DESTINATION\n # s3 bucket\n s3 = boto3.client('s3')\n filename = event['detail']['TranscriptionJobName'] + '.json'\n logger.info('Filename: {}'.format(str(filename)))\n try:\n logger.debug(f'Trying to get {filename} from {TRANSCRIPTS}')\n data = s3.get_object(Bucket=TRANSCRIPTS, Key=filename)\n data = data['Body'].read().decode('utf-8')\n except Exception as e:\n logger.error(str(e))\n raise e\n else:\n logger.debug('Success')\n\n try:\n data = json.loads(data)\n except Exception as e:\n logger.error(str(e))\n raise e\n\n logger.debug(f'Keys of object: {str(data.keys())}')\n\n # Give the transcript text to comprehend.py\n try:\n transcript_text = data['results']['transcripts'][0]['transcript']\n except Exception as e:\n logger.error(str(e))\n raise e\n\n if sys.getsizeof(transcript_text) <= 25:\n logger.warning(f'Transcript is empty for {filename}. Exiting.')\n return False\n\n comp_obj = {}\n logger.debug(f'Creating record')\n comp_obj['referenceNumber'] = event['detail']['TranscriptionJobName'] \\\n .split('--')[0]\n logger.debug(f\"\"\"Ref: {comp_obj['referenceNumber']}\"\"\")\n comp_obj['text'] = transcript_text\n logger.debug(f\"\"\"Text: {comp_obj['text']}\"\"\")\n # Get sentiment using AWS Comprehend\n sentiment = comprehend.get_sentiment(transcript_text)\n comp_obj['sentiment'] = sentiment\n logger.debug('Sentiment: {}'.format(str(comp_obj['sentiment'])))\n # Get entities\n comp_obj['entities'] = comprehend.get_entities(transcript_text)\n logger.debug(f\"\"\"Text: {comp_obj['entities']}\"\"\")\n # Get Key Phrases\n comp_obj['keyPhrases'] = comprehend.get_key_phrases(transcript_text)\n logger.debug(f\"\"\"Text: {comp_obj['keyPhrases']}\"\"\")\n # Check promotion\n results = promotion.Promotion(data)\n comp_obj['promotion'] = results['Promo']\n logger.debug('comp_obj promo: {}'.format(str(comp_obj['promotion'])))\n # Save to json file in 'comprehend.rightcall' bucket\n logger.debug(f'Finished creating record')\n logger.debug(f'Saving to {COMPREHEND} s3 bucket')\n try:\n response = s3.put_object(Body=json.dumps(comp_obj, indent=2),\n Bucket=COMPREHEND,\n Key=event['detail']['TranscriptionJobName']\n + '.json')\n except Exception as e:\n logger.error(str(e))\n raise e\n else:\n logger.debug('Success')\n return response\n\n\ndef event_type_transcribe_job_status(event):\n \"\"\"Check if event is from Cloudwatch\n If 'aws.transcribe' event from Cloudwatch return True\n Else return False\n \"\"\"\n if 'source' in event and event['source'] == 'aws.transcribe':\n logger.info('Job: {} Status: {}'.format(\n str(event['detail']['TranscriptionJobName']),\n str(event['detail']['TranscriptionJobStatus'])))\n return True\n else:\n return False\n\n\ndef event_type_sqs_s3_new_object(event):\n \"\"\"\n Check if event is a new object event from s3 delivered by sqs\n \"\"\"\n body = json.loads(event['Records'][0]['body'])\n if 'Records' not in body:\n return False\n if 'Records' in event.keys():\n if event['Records'][0]['eventSource'] == 'aws:sqs':\n return True\n else:\n return False\n return False\n\n\ndef Rightcall(event):\n \"\"\"Determine event type (S3 or Cloudwatch) and\n take appropriate action\"\"\"\n response = {}\n if event_type_transcribe_job_status(event):\n logger.info('Transcribe job event received. Sending to Comprehend.')\n response = Comprehend(event)\n elif event_type_sqs_s3_new_object(event):\n logger.info('New mp3 uploaded. Sending to Transcribe.')\n response = Transcribe(event)\n else:\n logger.info('Unknown Event Type. Ignoring')\n response = False\n return response\n\n\ndef lambda_handler(event, context):\n \"\"\" New MP3 file uploaded to 'mp3.rightcall'\n Event sent to this lambda function from s3 bucket\n \"\"\"\n logger.info('Received event: {}'.format(str(json.dumps(event, indent=2))))\n response = Rightcall(event)\n return response\n\n\nif __name__ == '__main__':\n transcribe_job_status_event = {\n 'version': '0',\n 'id': 'event ID',\n 'detail-type': 'Transcribe Job State Change',\n 'source': 'aws.transcribe',\n 'account': 'account ID',\n 'time': 'timestamp',\n 'region': 'region',\n 'resources': [],\n 'detail': {\n 'TranscriptionJobName': 'b76152TVd00246--3bc99ed9-e035-4316-9a05',\n 'TranscriptionJobStatus': 'COMPLETE'\n }\n }\n s3_new_object_event = {\n 'Records': [\n {\n 'eventVersion': '2.0',\n 'eventSource': 'aws:s3',\n 'awsRegion': 'eu-west-1',\n 'eventTime': '1970-01-01T00:00:00.000Z',\n 'eventName': 'ObjectCreated:Put',\n 'userIdentity': {\n 'principalId': 'AIDAJDPLRKLG7UEXAMPLE'\n },\n 'requestParameters': {\n 'sourceIPAddress': '127.0.0.1'\n },\n 'responseElements': {\n 'x-amz-request-id': 'C3D13FE58DE4C810',\n },\n 's3': {\n 's3SchemaVersion': '1.0',\n 'configurationId': 'testConfigRule',\n 'bucket': {\n 'name': 'mp3.rightcall',\n 'ownerIdentity': {\n 'principalId': 'A3NL1KOZZKExample'\n },\n 'arn': 'arn:aws:s3:::mp3.rightcall'\n },\n 'object': {\n 'key': 'jobs/bda5cbTVd10162.mp3',\n 'size': 1024,\n 'eTag': 'd41d8cd98f00b204e9800998ecf8427e',\n 'versionId': '096fKKXTRTtl3on89fVO.nfljtsv6qko',\n 'sequencer': '0055AED6DCD90281E5'\n }\n }\n }\n ]\n }\n sqs_test_event = {\n 'Records': [\n {\n 'messageId': '4227a528-e786-46a7-8906-0c6a9260ebc0',\n 'receiptHandle': 'AQEBDOvsT+wgZOTQzDFAgZiJXIREBOMC/X9lyyQXnhFbVZLSwmbGPe5ZkowPPUpIv5ZnEWAUIDt+YsDNsOlaV+uDXBh7/JKG09/m4kuH3CB8XAbXnNCH2X0HDCf+cwAN8sLxBGQtcZ5tcYyPD+ZwD55qSeeXfK6RVVBrOaLZHush75ZO102F8ciYzyQHosylXsD3lTG1HJaVsMyyAEqBZbWcO4U3p3U3NxHapKbOB80IrwdJ4wgjVGgasIEsTIH+sMlImjjowyM6hbnfvU8B6pIAXyHrnqPCCgrnAHL49DjBFIauzU4F528RVSAGbM9Trm9MkmdQNW5w8HWw8GfELjz+O0aRDl3Cylrti1wAk/Du/h9Co+F00vd/q3f+vGfmuJSuH2aIOkSUGFJ2TCPkPAQ7TA==',\n 'body': '{\\'Service\\':\\'Amazon S3\\',\\'Event\\':\\'s3:TestEvent\\',\\'Time\\':\\'2018-10-25T10:21:09.859Z\\',\\'Bucket\\':\\'mp3.rightcall\\',\\'RequestId\\':\\'B30EA916318276B2\\',\\'HostId\\':\\'+7iVNqh64HZ8/+QSK4i5CV5WtiYIGi8Xgpsb6jXYGcPPtUB0SdjECoWenb/vK3+Duuf1qENMOm8=\\'}',\n 'attributes': {\n 'ApproximateReceiveCount': '137',\n 'SentTimestamp': '1540462870055',\n 'SenderId': 'AIDAJVEO32BJMF27H2JKW',\n 'ApproximateFirstReceiveTimestamp': '1540462870055'\n },\n 'messageAttributes': {},\n 'md5OfBody': '802614776f1fdea805f34291a274c685',\n 'eventSource': 'aws:sqs',\n 'eventSourceARN': 'arn:aws:sqs:eu-west-1:868234285752:RightcallTranscriptionJobs',\n 'awsRegion': 'eu-west-1'\n }\n ]\n }\n sqs_event = {\n 'Records': [\n {\n 'messageId': '1b498e27-3a64-4847-89bd-e5026604b212',\n 'receiptHandle': 'AQEBJuUjL8Q2Z93UOS03uX/ynEUWt35ySRWqgFltcsiyv3bcSqeM+HdQc/aoNzIYHf3Z5gCtMpXRZ3EFo2Z/jCJHQAX5e8lH/Wot8VvZ3iBUzVL/GgV5Sg5mU/i+XrtDvuprzE2uvrDYMIAuzUfzoYUBJyPBn6uZRed/0gfK7XhXSeiBhRnMo1Qjjzuguiton6inehUaDUK3Nre+h5+yDDjU/ZBZmFfGi+mfUIJ6+z6n6MbxqC3yF8Oh2zOWF5QavEk9dxVAT/+t1p6wtagvXjT0od1WHZ1U8K9aUzSFePcdA+/hr0SIrpcpO1011/9iJtT837o+AvgONxsg7HEM9oUsAO/nH/erQVn45/q+Kdq57iVKHkzLNM+SJM6CQcHx9UlCdrZoT7IricptnlOGz6MTjQ==',\n 'body': '{\\'Records\\':[{\\'eventVersion\\':\\'2.0\\',\\'eventSource\\':\\'aws:s3\\',\\'awsRegion\\':\\'eu-west-1\\',\\'eventTime\\':\\'2018-10-26T09:30:14.124Z\\',\\'eventName\\':\\'ObjectCreated:Put\\',\\'userIdentity\\':{\\'principalId\\':\\'AWS:AIDAJFUNXNZ3MF2LQLH4O\\'},\\'requestParameters\\':{\\'sourceIPAddress\\':\\'185.31.48.30\\'},\\'responseElements\\':{\\'x-amz-request-id\\':\\'CC8C87308642EBE5\\',\\'x-amz-id-2\\':\\'+yybnPTMtJ4vslctD/uUXfeI//dT/4qbCF8c6O5QaSzAbWSsr4fB164HcgbkQT3jol9Ul2kn0g8=\\'},\\'s3\\':{\\'s3SchemaVersion\\':\\'1.0\\',\\'configurationId\\':\\'2116bf90-74fc-4c00-a60d-d3c2c5a35132\\',\\'bucket\\':{\\'name\\':\\'mp3.rightcall\\',\\'ownerIdentity\\':{\\'principalId\\':\\'A2VMRRM44J4WYZ\\'},\\'arn\\':\\'arn:aws:s3:::mp3.rightcall\\'},\\'object\\':{\\'key\\':\\'jobs/c9c4bfTOd00994.mp3\\',\\'size\\':784656,\\'eTag\\':\\'8c31f6b56802fa76346d04cdcfdeb430\\',\\'sequencer\\':\\'005BD2DEA55CFDC057\\'}}}]}',\n 'attributes': {\n 'ApproximateReceiveCount': '1',\n 'SentTimestamp': '1540546214173',\n 'SenderId': 'AIDAJVEO32BJMF27H2JKW',\n 'ApproximateFirstReceiveTimestamp': '1540546214174'\n },\n 'messageAttributes': {},\n 'md5OfBody': '94f778ba57b38e0080a56466c7727f96',\n 'eventSource': 'aws:sqs',\n 'eventSourceARN': 'arn:aws:sqs:eu-west-1:868234285752:RightcallTranscriptionJobs',\n 'awsRegion': 'eu-west-1'\n }\n ]\n }\n unhandled = {\n 'Records': [\n {\n 'messageId': '861e8a55-322d-47a0-b17b-0c291904206c',\n 'receiptHandle': 'AQEBysB8VVt2s2pAP2h9WQERLVwB7egK3VYCITBnMSYw5HaB98YGrkfMW4+of/9t7sJE9Ghd0E2fxYxFP8aYTGMF4xxKDxSfrHWczI3hiXCxScQPDppIQDWK2KrJ6jc3exzh0nJeXS2qZ9zwSjn+yjkUp5XGeGgj8CyZY/6uGC3UqBUa6+KxMdssPc7oPC1/sMribgcBNelrRDIv2p1X53ofL7afYEzuCgHVy8RO2hiFF6EPRpw7/3BnlmII4I/3zWoIbWulk50LEvlv8C53nZk+0JGxvlhUJyVLX16KKdL/GoUsmwU6h0KYFaermLdaqZi0i2znQeMw2Ye8XSvJL8jQs33IkFBCiiwLFvvuCKzQiVJnOdsfKJ3u9BF1aVglDod9zYqT2QvZxpxo6RIRjuXH+g==',\n 'body': '{\\'Service\\':\\'Amazon S3\\',\\'Event\\':\\'s3:TestEvent\\',\\'Time\\':\\'2018-10-31T14:08:15.242Z\\',\\'Bucket\\':\\'mp3.rightcall\\',\\'RequestId\\':\\'BB78EBDA2952667F\\',\\'HostId\\':\\'+8NUd+txRSmPQlapdO69n47dxJi5DQqqZ+kFV//ooNOkj0iBnFbONOSN4/XO6TfJf54exLEJtRo=\\'}',\n 'attributes': {\n 'ApproximateReceiveCount': '1',\n 'SentTimestamp': '1540994895388',\n 'SenderId': 'AIDAJVEO32BJMF27H2JKW',\n 'ApproximateFirstReceiveTimestamp': '1540994895398'\n },\n 'messageAttributes': {},\n 'md5OfBody': '90bca7ebfeff23e8c6e9458ef7a80907',\n 'eventSource': 'aws:sqs',\n 'eventSourceARN': 'arn:aws:sqs:eu-west-1:868234285752:RightcallTranscriptionJobs',\n 'awsRegion': 'eu-west-1'\n }\n ]\n }\n response = lambda_handler(transcribe_job_status_event, None)\n print(response)\n", "sub_path": "lambda_functions/rightcall/lambda_function.py", "file_name": "lambda_function.py", "file_ext": "py", "file_size_in_byte": 13874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "transcribe.transcribe_mp3", "line_number": 70, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 104, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 118, "usage_type": "call"}, {"api_name": "comprehend.get_sentiment", "line_number": 130, "usage_type": "call"}, {"api_name": "comprehend.get_entities", "line_number": 134, "usage_type": "call"}, {"api_name": "comprehend.get_key_phrases", "line_number": 137, "usage_type": "call"}, {"api_name": "promotion.Promotion", "line_number": 140, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 147, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 177, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "525334602", "text": "# -*- encoding:utf8 -*-\nimport os,requests\nfrom tools import *\ndef apis_category_list(access_token,targetUrl,output='json'):\n url = 'https://www.tistory.com/apis/category/list' #카테고리 목록 조회\n params=dict()\n params['access_token'] = access_token\n params['targetUrl'] = targetUrl\n params['output'] = output #/ 출력 포맷 json: JSON출력, xml: XML출력, 그외: XML출력\n r = requests.post(url,data=params)\n return r.status_code,r.text\n\ndef apis_post_attach(access_token,targetUrl,uploadedfile,output='json'):#파일첨부\n url='https://www.tistory.com/apis/post/attach'\n fields = list()\n fields.append(['access_token', access_token])\n fields.append(['targetUrl', targetUrl])\n fields.append(['output', output])\n files = [('uploadedfile', os.path.basename(uploadedfile), open(uploadedfile, 'rb').read())]\n content_type, body = encode_multipart_formdata(fields, files)\n r = requests.post(url='https://www.tistory.com/apis/post/attach',\n data=body,\n headers = {\n 'Content-Type': content_type,\n 'Content-Length': len(body),\n })\n return r.status_code, r.text\n\ndef apis_post_write(access_token,targetUrl,title,content,visibility=2,published=None,category=0,slogan=None,tag='',output='json'):\n url='https://www.tistory.com/apis/post/write?' #게시글 작성\n params=dict()\n params['access_token']=access_token\n params['targetUrl']=targetUrl\n params['title']=title #POST\n params['content']=content #POST / 글 내용\n params['visibility']=visibility #글의 상태 0: 비공개, 1: 보호, 2: 공개, 3: 발행, 생략시 비공개\n if published != None:\n params['published']=published #POST / 발행시간\tUNIX_TIMESTAMP() 값을 넣을경우, 해당 날짜에 예약발행 처리\n params['category']=category #POST / 카테고리 아이디\t생략시 0(분류없음)\n if slogan != None:\n params['slogan']=slogan # \t문자 주소\n params['tag']=tag #POST / 태그\t,로 구분하며 이어서 입력\n params['output']=output #/ \t출력 포맷\tjson: JSON출력, xml: XML출력, 그외: XML출력\n r = requests.post(url,data=params)\n return r.status_code, r.text\n\n", "sub_path": "api/tistory.py", "file_name": "tistory.py", "file_ext": "py", "file_size_in_byte": 2298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "requests.post", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "21874803", "text": "import torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom basicsr.models.archs import arch_util as arch_util\nimport pywt\n\n\nclass MSRResNet_WT_Pixel(nn.Module):\n \"\"\"Modified SRResNet.\n\n A compacted version modified from SRResNet in\n \"Photo-Realistic Single Image Super-Resolution Using a Generative\n Adversarial Network\"\n It uses residual blocks without BN, similar to EDSR.\n Currently, it supports x2, x3 and x4 upsampling scale factor.\n\n Args:\n num_in_ch (int): Channel number of inputs. Default: 3.\n num_out_ch (int): Channel number of outputs. Default: 3.\n num_feat (int): Channel number of intermediate features.\n Default: 64.\n num_block (int): Block number in the body network. Default: 16.\n upscale (int): Upsampling factor. Support x2, x3 and x4.\n Default: 4.\n \"\"\"\n\n def __init__(self,\n num_in_ch=3,\n num_out_ch=3,\n num_feat=64,\n num_block=16,\n upscale=4):\n super(MSRResNet_WT_Pixel, self).__init__()\n self.upscale = upscale\n\n self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)\n self.body = arch_util.make_layer(\n arch_util.ResidualBlockNoBN, num_block, num_feat=num_feat)\n\n # upsampling\n if self.upscale in [2, 3]:\n self.upconv1 = nn.Conv2d(num_feat,\n num_feat * self.upscale * self.upscale, 3,\n 1, 1)\n self.pixel_shuffle = nn.PixelShuffle(self.upscale)\n elif self.upscale == 4:\n self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)\n self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)\n self.pixel_shuffle = nn.PixelShuffle(2)\n\n self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n # initialization\n arch_util.default_init_weights(\n [self.conv_first, self.upconv1, self.conv_hr, self.conv_last], 0.1)\n if self.upscale == 4:\n arch_util.default_init_weights(self.upconv2, 0.1)\n\n # WT filter\n inv_filters = arch_util.create_inv_filters()\n self.register_buffer('inv_filters', inv_filters)\n\n # Normalization buffers\n self.register_buffer(\n 'shift',\n torch.Tensor([3.0]))\n self.register_buffer(\n 'scale',\n torch.Tensor([10.0]))\n\n def forward(self, x):\n # IWT x once to get LFC\n x = arch_util.iwt(x, self.inv_filters, 1)\n\n # Normalize to (0, 1) range\n x = arch_util.normalize_wt(x, self.shift, self.scale)\n assert (x.min() >= 0.0 and x.max() <= 1.0)\n \n feat = self.lrelu(self.conv_first(x))\n out = self.body(feat)\n\n if self.upscale == 4:\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n elif self.upscale in [2, 3]:\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n out = self.conv_last(self.lrelu(self.conv_hr(out)))\n\n # IWT'ed version of x with zero padding for dimensions\n base = arch_util.iwt(arch_util.zero_pad(x, x.shape[3]*self.upscale, x.device), self.inv_filters, 2)\n out += base\n return out\n", "sub_path": "basicsr/models/archs/srresnet_wt_arch.py", "file_name": "srresnet_wt_arch.py", "file_ext": "py", "file_size_in_byte": 3520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.make_layer", "line_number": 38, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 38, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.ResidualBlockNoBN", "line_number": 39, "usage_type": "attribute"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.PixelShuffle", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.PixelShuffle", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.default_init_weights", "line_number": 59, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 59, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.default_init_weights", "line_number": 62, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 62, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.create_inv_filters", "line_number": 65, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 74, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util.iwt", "line_number": 78, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 78, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.normalize_wt", "line_number": 81, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 81, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.iwt", "line_number": 96, "usage_type": "call"}, {"api_name": "basicsr.models.archs.arch_util", "line_number": 96, "usage_type": "name"}, {"api_name": "basicsr.models.archs.arch_util.zero_pad", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "237650173", "text": "from __future__ import print_function\n\nimport os\nimport time\nimport pickle\nimport random\nimport copy\nimport Queue\nimport sys\nimport cProfile\nimport pstats\nimport argparse\n\nimport pddlstream.algorithms.instantiate_task\npddlstream.algorithms.instantiate_task.FD_INSTANTIATE = False\n\nfrom pddlstream.algorithms.focused import solve_focused\nfrom pddlstream.algorithms.incremental import solve_incremental\nfrom pddlstream.language.constants import print_solution\nfrom pddlstream.utils import read\nfrom pddlstream.language.generator import from_gen_fn, from_list_fn, from_test, fn_from_constant, from_fn\nfrom pddlstream.algorithms.search import SERIALIZE\n#import pdb;pdb.set_trace()\n\nfrom gtamp_problem_environments.mover_env import Mover\nfrom generators.uniform import UniformGenerator, PaPUniformGenerator\nfrom generators.one_arm_pap_uniform_generator import OneArmPaPUniformGenerator\n\nfrom trajectory_representation.operator import Operator\nfrom planners.subplanners.motion_planner import BaseMotionPlanner\n\nfrom mover_library.utils import set_robot_config, set_obj_xytheta, visualize_path, two_arm_pick_object, two_arm_place_object, \\\n get_body_xytheta, CustomStateSaver, set_color\n\nfrom mover_library.motion_planner import rrt_region\n\nfrom openravepy import RaveSetDebugLevel, DebugLevel\nfrom trajectory_representation.trajectory import Trajectory\n\nimport numpy as np\nimport openravepy\n\nimport collections\nfrom collections import Counter\nfrom manipulation.primitives.display import set_viewer_options, draw_line, draw_point\nfrom manipulation.primitives.savers import DynamicEnvironmentStateSaver\n\nfrom generators.samplers.uniform_sampler import UniformSampler\nfrom generators.TwoArmPaPGenerator import TwoArmPaPGenerator\nfrom generators.one_arm_pap_uniform_generator import OneArmPaPUniformGenerator\n\nPRM_VERTICES, PRM_EDGES = pickle.load(open('prm.pkl', 'rb'))\nPRM_VERTICES = list(PRM_VERTICES) # TODO: needs to be a list rather than ndarray\n\nnum_ik_checks = 0\nnum_mp_checks = 0\n\ndef gen_pap(problem, config):\n # cache ik solutions\n ikcachename = './ikcache.pkl'\n iksolutions = collections.defaultdict(list)\n if os.path.isfile(ikcachename):\n iksolutions = pickle.load(open(ikcachename, 'r'))\n\n def fcn(o, r, s):\n global num_ik_checks\n global num_mp_checks\n\n if problem.name == 'two_arm_mover':\n abstract_state = None # TODO: figure out what goes here\n abstract_action = Operator('two_arm_pick_two_arm_place', {'object': problem.env.GetKinBody(o), 'place_region': problem.regions[r]})\n sampler = UniformSampler(problem.regions[r])\n generator = TwoArmPaPGenerator(abstract_state, abstract_action, sampler,\n n_parameters_to_try_motion_planning=config.n_mp_limit,\n n_iter_limit=config.n_iter_limit, problem_env=problem,\n pick_action_mode='ir_parameters',\n place_action_mode='object_pose')\n while True:\n s.Restore()\n prev_ik_checks = generator.n_ik_checks\n prev_mp_checks = generator.n_mp_checks\n params = generator.sample_next_point()\n num_ik_checks += generator.n_ik_checks - prev_ik_checks\n num_mp_checks += generator.n_mp_checks - prev_mp_checks\n if params['is_feasible']:\n abstract_action.continuous_parameters = params\n abstract_action.execute()\n t = CustomStateSaver(problem.env)\n yield params, t\n else:\n yield None\n\n elif problem.name == 'one_arm_mover':\n while True:\n s.Restore()\n action = Operator('one_arm_pick_one_arm_place', {'object': problem.env.GetKinBody(o), 'place_region': problem.regions[r]})\n current_region = problem.get_region_containing(problem.env.GetKinBody(o)).name\n sampler = OneArmPaPUniformGenerator(action, problem, cached_picks=(iksolutions[current_region], iksolutions[r]))\n pick_params, place_params, status = sampler.sample_next_point(max_ik_attempts=500)\n\n if status == 'HasSolution':\n action.continuous_parameters = {'pick': pick_params, 'place': place_params}\n action.execute()\n t = CustomStateSaver(problem.env)\n yield action.continuous_parameters, t\n else:\n yield None\n else:\n raise NotImplementedError\n\n return fcn\n\n\ndef get_problem(mover, goal_objects, goal_region_name, config):\n directory = os.path.dirname(os.path.abspath(__file__))\n domain_pddl = read(os.path.join(directory, 'domain.pddl'))\n stream_pddl = read(os.path.join(directory, 'stream.pddl'))\n\n constant_map = {}\n stream_map = {\n 'gen-pap': from_gen_fn(gen_pap(mover, config)),\n }\n\n obj_names = [obj.GetName() for obj in mover.objects]\n obj_poses = [get_body_xytheta(mover.env.GetKinBody(obj_name)).squeeze() for obj_name in obj_names]\n\n initial_robot_conf = get_body_xytheta(mover.robot).squeeze()\n\n if mover.name == 'two_arm_mover':\n goal_region = 'home_region'\n nongoal_regions = ['loading_region']\n elif mover.name == 'one_arm_mover':\n goal_region = mover.target_box_region.name\n nongoal_regions = ['center_shelf_region']#list(mover.shelf_regions)\n else:\n raise NotImplementedError\n\n print(goal_region, nongoal_regions, mover.regions.keys())\n\n init = [('Pickable', obj_name) for obj_name in obj_names]\n init += [('InRegion', obj_name, mover.get_region_containing(mover.env.GetKinBody(obj_name)).name) for obj_name in obj_names]\n init += [('Region', region) for region in nongoal_regions + [goal_region]]\n\n init += [('GoalObject', obj_name) for obj_name in goal_objects]\n init += [('NonGoalRegion', region) for region in nongoal_regions]\n\n init_state = CustomStateSaver(mover.env)\n init += [('State', init_state)]\n init += [('AtState', init_state)]\n\n # robot initialization\n init += [('EmptyArm',)]\n init += [('AtConf', initial_robot_conf)]\n init += [('BaseConf', initial_robot_conf)]\n\n # object initialization\n init += [('Pose', obj_pose) for obj_name, obj_pose in zip(obj_names, obj_poses)]\n init += [('PoseInRegion', obj_pose, 'loading_region') for obj_name, obj_pose in zip(obj_names, obj_poses)]\n init += [('AtPose', obj_name, obj_pose) for obj_name, obj_pose in zip(obj_names, obj_poses)]\n\n goal = ['and'] + [('InRegion', obj_name, goal_region_name)\n for obj_name in goal_objects]\n\n print('Num init:', Counter(fact[0] for fact in init))\n print('Goal:', goal)\n print('Streams:', sorted(stream_map))\n\n return domain_pddl, constant_map, stream_pddl, stream_map, init, goal\n\n\ndef search(mover, config, pap_model, goal_objs, goal_region_name, learned_smpler=None, reachability_clf=None):\n if learned_smpler is not None:\n raise NotImplementedError\n\n goal_objs = goal_objs[:config.n_objs_pack]\n\n print('Vertices:', len(PRM_VERTICES))\n print('Edges:', sum(len(edges) for edges in PRM_EDGES))\n\n pddlstream_problem = get_problem(mover, goal_objs, goal_region_name, config)\n stime = time.time()\n pr = cProfile.Profile()\n pr.enable()\n # planner = 'ff-lazy' # -tiebreak\n # planner = 'ff-eager-tiebreak' # -tiebreak\n planner = 'ff-wastar5'\n # planner = 'cea-wastar5' # Performs worse than ff-wastar\n # planner = 'ff-ehc' # Worse\n\n set_color(mover.objects[0], [1, 0, 0])\n solution = solve_focused(pddlstream_problem, unit_costs=True, max_time=10 * 60,\n #solution = solve_incremental(pddlstream_problem, unit_costs=True, max_time=10 * 60,\n planner=planner, debug=True, verbose=True)\n pr.disable()\n pstats.Stats(pr).sort_stats('tottime').print_stats(10)\n search_time = time.time() - stime\n plan, cost, evaluations = solution\n print(\"time: {}\".format(search_time))\n if plan is not None:\n print('Success')\n\n if config.domain == 'two_arm_mover':\n actions = [\n Operator('two_arm_pick_two_arm_place', {\n 'object': str(action.args[0]),\n 'place_region': str(action.args[1]),\n }, action.args[3])\n for action in plan\n ]\n elif config.domain == 'one_arm_mover':\n actions = [\n Operator('one_arm_pick_one_arm_place', {\n 'object': str(action.args[0]),\n 'place_region': str(action.args[1]),\n }, action.args[3])\n for action in plan\n ]\n else:\n raise NotImplementedError\n print(actions)\n return [], actions, (num_ik_checks, num_mp_checks), []\n else:\n print(\"Plan not found\")\n return [], None, (num_ik_checks, num_mp_checks), []\n\n", "sub_path": "planners/stripstream/stripstream.py", "file_name": "stripstream.py", "file_ext": "py", "file_size_in_byte": 9077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pddlstream.algorithms.instantiate_task.algorithms", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pddlstream.algorithms.instantiate_task", "line_number": 15, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 52, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 63, "usage_type": "call"}, {"api_name": "trajectory_representation.operator.Operator", "line_number": 71, "usage_type": "call"}, {"api_name": "generators.samplers.uniform_sampler.UniformSampler", "line_number": 72, "usage_type": "call"}, {"api_name": "generators.TwoArmPaPGenerator.TwoArmPaPGenerator", "line_number": 73, "usage_type": "call"}, {"api_name": "mover_library.utils.CustomStateSaver", "line_number": 88, "usage_type": "call"}, {"api_name": "trajectory_representation.operator.Operator", "line_number": 96, "usage_type": "call"}, {"api_name": "generators.one_arm_pap_uniform_generator.OneArmPaPUniformGenerator", "line_number": 98, "usage_type": "call"}, {"api_name": "mover_library.utils.CustomStateSaver", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 115, "usage_type": "call"}, {"api_name": "pddlstream.utils.read", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pddlstream.utils.read", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pddlstream.language.generator.from_gen_fn", "line_number": 121, "usage_type": "call"}, {"api_name": "mover_library.utils.get_body_xytheta", "line_number": 125, "usage_type": "call"}, {"api_name": "mover_library.utils.get_body_xytheta", "line_number": 127, "usage_type": "call"}, {"api_name": "mover_library.utils.CustomStateSaver", "line_number": 147, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 164, "usage_type": "call"}, {"api_name": "time.time", "line_number": 181, "usage_type": "call"}, {"api_name": "cProfile.Profile", "line_number": 182, "usage_type": "call"}, {"api_name": "mover_library.utils.set_color", "line_number": 190, "usage_type": "call"}, {"api_name": "pddlstream.algorithms.focused.solve_focused", "line_number": 191, "usage_type": "call"}, {"api_name": "pstats.Stats", "line_number": 195, "usage_type": "call"}, {"api_name": "time.time", "line_number": 196, "usage_type": "call"}, {"api_name": "trajectory_representation.operator.Operator", "line_number": 204, "usage_type": "call"}, {"api_name": "trajectory_representation.operator.Operator", "line_number": 212, "usage_type": "call"}]} +{"seq_id": "506879889", "text": "import urllib2\nimport json\nfrom encrypter import Encryption\n\n\nclass Slack:\n\n def __init__(self):\n #insert slack url\n self.url = ''\n self.encrypter = Encryption()\n\n def send(self, text, hashing=False):\n if hashing:\n data = {'text': \"{0} - Signature: {1}\".format(text, self.encrypter.encrypt(text))}\n else:\n data = {'text': text}\n\n data = json.dumps(data)\n req = urllib2.Request(self.url, data, {'Content-Type': 'application/json'})\n urllib2.urlopen(req)\n\n\n", "sub_path": "slack.py", "file_name": "slack.py", "file_ext": "py", "file_size_in_byte": 539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "encrypter.Encryption", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 20, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "127705004", "text": "from __future__ import print_function\n\nimport logging\n\nfrom pyspark.sql.types import IntegerType, StringType, StructField, StructType\nfrom pytest import fixture\n\nfrom dbnd_spark.spark_targets import SparkDataFrameValueType\nfrom targets.value_meta import ValueMetaConf\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestHistograms:\n @fixture\n def meta_conf(self):\n conf = ValueMetaConf.enabled()\n return conf\n\n @fixture\n def numbers(self):\n return [1, 1, 3, 1, 5, None, 1, 5, 5, None]\n\n def validate_numerical_histogram_and_stats(self, value_meta, column_name):\n \"\"\" assuming numbers fixture is used \"\"\"\n assert column_name in value_meta.histograms\n histogram = value_meta.histograms[column_name]\n assert len(histogram) == 2\n assert len(histogram[0]) == 20\n assert len(histogram[1]) == 21\n assert sum(histogram[0]) == 8\n\n stats = value_meta.descriptive_stats[column_name]\n assert set(stats.keys()) == {\n \"count\",\n \"mean\",\n \"min\",\n \"25%\",\n \"50%\",\n \"75%\",\n \"max\",\n \"std\",\n \"type\",\n \"distinct\",\n \"null-count\",\n \"non-null\",\n }\n assert stats[\"count\"] == 10\n assert stats[\"non-null\"] == 8\n assert stats[\"distinct\"] == 4\n assert stats[\"min\"] == 1\n assert stats[\"max\"] == 5\n\n def test_boolean_histogram(self, spark_session, meta_conf):\n booleans = [True] * 10 + [None] * 10 + [False] * 20 + [True] * 20\n booleans = [(i,) for i in booleans]\n boolean_df = spark_session.createDataFrame(booleans, [\"boolean_column\"])\n\n value_meta = SparkDataFrameValueType().get_value_meta(boolean_df, meta_conf)\n\n histogram = value_meta.histograms[\"boolean_column\"]\n assert histogram[0] == [30, 20, 10]\n assert histogram[1] == [True, False, None]\n\n stats = value_meta.descriptive_stats[\"boolean_column\"]\n assert stats[\"count\"] == 60\n assert stats[\"type\"] == \"boolean\"\n\n def test_int_column(self, spark_session, meta_conf, numbers):\n numbers = [(i,) for i in numbers]\n df = spark_session.createDataFrame(numbers, [\"numerical_column\"])\n value_meta = SparkDataFrameValueType().get_value_meta(df, meta_conf)\n # pandas_df = df.toPandas()\n # pandas_stats, pandas_histograms = DataFrameValueType().get_histograms(pandas_df, meta_conf)\n self.validate_numerical_histogram_and_stats(value_meta, \"numerical_column\")\n stats = value_meta.descriptive_stats[\"numerical_column\"]\n assert stats[\"type\"] == \"long\"\n\n def test_float_column(self, spark_session, meta_conf, numbers):\n numbers = [(float(i),) if i else (None,) for i in numbers]\n df = spark_session.createDataFrame(numbers, [\"numerical_column\"])\n value_meta = SparkDataFrameValueType().get_value_meta(df, meta_conf)\n # pandas_df = df.toPandas()\n # pandas_stats, pandas_histograms = DataFrameValueType().get_histograms(pandas_df, meta_conf)\n self.validate_numerical_histogram_and_stats(value_meta, \"numerical_column\")\n stats = value_meta.descriptive_stats[\"numerical_column\"]\n assert stats[\"type\"] == \"double\"\n\n def test_strings_histogram(self, spark_session, meta_conf):\n strings = (\n [\"Hello World!\"] * 15\n + [None] * 5\n + [\"Ola Mundo!\"] * 15\n + [\"Shalom Olam!\"] * 20\n + [\"Ola Mundo!\"] * 15\n )\n strings = [(i,) for i in strings]\n df = spark_session.createDataFrame(strings, [\"string_column\"])\n\n value_meta = SparkDataFrameValueType().get_value_meta(df, meta_conf)\n\n histogram = value_meta.histograms[\"string_column\"]\n assert histogram[0] == [30, 20, 15, 5]\n assert histogram[1] == [\"Ola Mundo!\", \"Shalom Olam!\", \"Hello World!\", None]\n\n stats = value_meta.descriptive_stats[\"string_column\"]\n assert set(stats.keys()) == {\n \"type\",\n \"distinct\",\n \"null-count\",\n \"non-null\",\n \"count\",\n }\n assert stats[\"count\"] == 70\n assert stats[\"non-null\"] == 65\n assert stats[\"null-count\"] == 5\n assert stats[\"distinct\"] == 4\n assert stats[\"type\"] == \"string\"\n\n def test_histogram_others(self, spark_session, meta_conf):\n strings = []\n for i in range(1, 101):\n str = \"str-{}\".format(i)\n new_strings = [str] * i\n strings.extend(new_strings)\n\n strings = [(i,) for i in strings]\n df = spark_session.createDataFrame(strings, [\"string_column\"])\n\n value_meta = SparkDataFrameValueType().get_value_meta(df, meta_conf)\n\n histogram = value_meta.histograms[\"string_column\"]\n assert len(histogram[0]) == 50 and len(histogram[1]) == 50\n assert histogram[0][0] == 100 and histogram[1][0] == \"str-100\"\n assert histogram[0][10] == 90 and histogram[1][10] == \"str-90\"\n assert histogram[0][-2] == 52 and histogram[1][-2] == \"str-52\"\n assert histogram[0][-1] == sum(range(1, 52)) and histogram[1][-1] == \"_others\"\n\n stats = value_meta.descriptive_stats[\"string_column\"]\n assert stats[\"count\"] == 5050 == sum(histogram[0])\n assert stats[\"non-null\"] == 5050\n assert stats[\"null-count\"] == 0\n assert stats[\"distinct\"] == 100\n assert stats[\"type\"] == \"string\"\n\n def test_complex_column(self, spark_session, meta_conf, numbers):\n complex = [(i, [str(i), str(i + 1)]) if i else [None] * 2 for i in numbers]\n df = spark_session.createDataFrame(\n complex, [\"numerical_column\", \"complex_column\"]\n )\n value_meta = SparkDataFrameValueType().get_value_meta(df, meta_conf)\n\n assert list(value_meta.histograms.keys()) == [\"numerical_column\"]\n assert list(value_meta.descriptive_stats.keys()) == [\"numerical_column\"]\n self.validate_numerical_histogram_and_stats(value_meta, \"numerical_column\")\n\n def test_null_int_column(self, spark_session, meta_conf):\n column_name = \"null_column\"\n nulls = [(None,) for _ in range(20)]\n schema = StructType([StructField(column_name, IntegerType(), True)])\n null_df = spark_session.createDataFrame(nulls, schema=schema)\n value_meta = SparkDataFrameValueType().get_value_meta(null_df, meta_conf)\n\n assert value_meta.histograms == {}\n stats = value_meta.descriptive_stats[column_name]\n assert stats[\"type\"] == \"integer\"\n\n def test_null_str_column(self, spark_session, meta_conf):\n column_name = \"null_column\"\n nulls = [(None,) for _ in range(20)]\n schema = StructType([StructField(column_name, StringType(), True)])\n null_df = spark_session.createDataFrame(nulls, schema=schema)\n value_meta = SparkDataFrameValueType().get_value_meta(null_df, meta_conf)\n assert value_meta.histograms[column_name] == ([20], [None])\n stats = value_meta.descriptive_stats[column_name]\n assert stats[\"type\"] == \"string\"\n\n def test_multiple_columns(self, spark_session, meta_conf, numbers):\n values = [(i, float(i), str(i), str(i)) if i else [None] * 4 for i in numbers]\n df = spark_session.createDataFrame(values, [\"ints\", \"floats\", \"str1\", \"str2\"])\n value_meta = SparkDataFrameValueType().get_value_meta(df, meta_conf)\n\n self.validate_numerical_histogram_and_stats(value_meta, \"ints\")\n self.validate_numerical_histogram_and_stats(value_meta, \"floats\")\n str_histogram_1 = value_meta.histograms[\"str1\"]\n str_histogram_2 = value_meta.histograms[\"str2\"]\n assert str_histogram_1[0] == [4, 3, 2, 1]\n assert str_histogram_1[1] == [\"1\", \"5\", None, \"3\"]\n assert str_histogram_1 == str_histogram_2\n", "sub_path": "plugins/dbnd-spark/tests/test_histograms.py", "file_name": "test_histograms.py", "file_ext": "py", "file_size_in_byte": 7864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "targets.value_meta.ValueMetaConf.enabled", "line_number": 18, "usage_type": "call"}, {"api_name": "targets.value_meta.ValueMetaConf", "line_number": 18, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 16, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 21, "usage_type": "name"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 60, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 73, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 83, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 101, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 131, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 152, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 161, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 161, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 161, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 163, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 172, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 172, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 172, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 174, "usage_type": "call"}, {"api_name": "dbnd_spark.spark_targets.SparkDataFrameValueType", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "278864382", "text": "import os\nfrom typing import Tuple, Optional\n\nfrom utils.download import get_file\n\n\ndef download_model(model_name: str = 'archive.zip', cache_dir: Optional[str] = None) -> Tuple[str, str]:\n if cache_dir is None:\n cache_dir = os.environ['TF_MODELS_FOLDER']\n\n # on your machine download the archive and get its checksum:\n # `sha256sum ./mobilenet_v2_1.0_224.tgz`\n url = 'http://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip'\n model_archive_file_hash = 'a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc'\n label_file_hash = '93f235896748537fc71325a070ee32e9a0afda2481ceb943559325619763fa6d'\n\n archive = get_file(\n cache_dir=cache_dir,\n fname=model_name,\n origin=url,\n extract=True,\n file_hash=model_archive_file_hash)\n\n labels = get_file(\n cache_dir=cache_dir,\n fname='coco_labels.txt',\n origin='https://dl.google.com/coral/canned_models/coco_labels.txt',\n file_hash=label_file_hash\n )\n\n return f'{cache_dir}/detect.tflite', labels\n", "sub_path": "smart-camera/app/object_detection/download_model.py", "file_name": "download_model.py", "file_ext": "py", "file_size_in_byte": 1106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "typing.Optional", "line_number": 7, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "utils.download.get_file", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.download.get_file", "line_number": 24, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "37344520", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pyaudio\nimport wave\nimport os\nimport sys\n\ndef play_signal_sound():\n question_start_signal = \"/home/kamerider/catkin_ws/src/kamerider_speech/sounds/question_start_signal.wav\"\n chunk = 1024\n # 打开 .wav 音频文件\n f = wave.open(question_start_signal, 'rb')\n # 初始化pyaudio\n p = pyaudio.PyAudio()\n # 打开一个stream\n stream = p.open(\n format = p.get_format_from_width(f.getsampwidth()),\n channels = f.getnchannels(),\n rate = f.getframerate(),\n output = True\n )\n # 读取音频文件中的数据\n data = f.readframes(chunk)\n\n # 播放音频文件\n while data != '':\n stream.write(data)\n data = f.readframes(chunk)\n \n # 终止stream\n stream.stop_stream()\n stream.close()\n # 关闭pyaudio\n p.terminate()\n", "sub_path": "src/play_signal_sound.py", "file_name": "play_signal_sound.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "wave.open", "line_number": 13, "usage_type": "call"}, {"api_name": "pyaudio.PyAudio", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "40482693", "text": "import os\nimport mock\nimport json\nimport unittest\nimport pkg_resources\n\nimport config.configuration as config\n\nfrom main import app\n\n\nclass ConfigTests(unittest.TestCase):\n\n def test_home_page(self):\n request, response = app.test_client.get('/')\n\n assert response.status == 200\n assert response.json == {'hello': 'world'}\n\n\n def test_health_endpoint(self):\n request, response = app.test_client.get('/healthz')\n\n assert response.status == 200\n assert response.json == True\n\n\n def test_matching_signing_hash_sends_200(self):\n headers = {'X-Slack-Request-Timestamp': '01/01/2020'}\n app.config.from_object(config.load_config())\n _, response = app.test_client.post('/depart',\n data=json.dumps({}),\n headers=headers)\n\n assert response.status == 200\n\n\n def test_non_matching_hash_sends_401(self):\n headers = {'X-Slack-Request-Timestamp': '01/01/2020'}\n app.config.from_object(config.load_config())\n app.config['SLACK_SIGNING_SECRET'] = 'wrong!'\n _, response = app.test_client.post('/depart',\n data=json.dumps({}),\n headers=headers)\n assert response.status == 401\n\n\n def test_post_without_timestamp(self):\n headers = {'empty': 'not here'}\n app.config.from_object(config.load_config())\n app.config['SLACK_SIGNING_SECRET'] = 'wrong!'\n _, response = app.test_client.post('/depart',\n data=json.dumps({}),\n headers=headers)\n assert response.status == 401\n", "sub_path": "tests/unit/test_basic_endpoints.py", "file_name": "test_basic_endpoints.py", "file_ext": "py", "file_size_in_byte": 1749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "main.app.test_client.get", "line_number": 15, "usage_type": "call"}, {"api_name": "main.app.test_client", "line_number": 15, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 15, "usage_type": "name"}, {"api_name": "main.app.test_client.get", "line_number": 22, "usage_type": "call"}, {"api_name": "main.app.test_client", "line_number": 22, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 22, "usage_type": "name"}, {"api_name": "main.app.config.from_object", "line_number": 30, "usage_type": "call"}, {"api_name": "main.app.config", "line_number": 30, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 30, "usage_type": "name"}, {"api_name": "config.configuration.load_config", "line_number": 30, "usage_type": "call"}, {"api_name": "config.configuration", "line_number": 30, "usage_type": "name"}, {"api_name": "main.app.test_client.post", "line_number": 31, "usage_type": "call"}, {"api_name": "main.app.test_client", "line_number": 31, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 31, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "main.app.config.from_object", "line_number": 40, "usage_type": "call"}, {"api_name": "main.app.config", "line_number": 40, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 40, "usage_type": "name"}, {"api_name": "config.configuration.load_config", "line_number": 40, "usage_type": "call"}, {"api_name": "config.configuration", "line_number": 40, "usage_type": "name"}, {"api_name": "main.app.config", "line_number": 41, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 41, "usage_type": "name"}, {"api_name": "main.app.test_client.post", "line_number": 42, "usage_type": "call"}, {"api_name": "main.app.test_client", "line_number": 42, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 42, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "main.app.config.from_object", "line_number": 50, "usage_type": "call"}, {"api_name": "main.app.config", "line_number": 50, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 50, "usage_type": "name"}, {"api_name": "config.configuration.load_config", "line_number": 50, "usage_type": "call"}, {"api_name": "config.configuration", "line_number": 50, "usage_type": "name"}, {"api_name": "main.app.config", "line_number": 51, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 51, "usage_type": "name"}, {"api_name": "main.app.test_client.post", "line_number": 52, "usage_type": "call"}, {"api_name": "main.app.test_client", "line_number": 52, "usage_type": "attribute"}, {"api_name": "main.app", "line_number": 52, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "610333884", "text": "# -*- coding: utf-8 -*-\n\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom ..items import LolItem\n\nx_name = '//h1[@class=\"hero-name\"]/text()'\nx_urls = '//li[starts-with(@class, \"ui-slide__panel\")]/img/@src'\n\nclass LolSpider(CrawlSpider):\n name = \"lol\"\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'lol.pipelines.LolImagesPipeline': 100,\n }\n }\n allowed_domains = ['lol.duowan.com']\n base_url = 'http://lol.duowan.com/hero'\n start_urls = ['http://lol.duowan.com/hero']\n rules = (\n Rule(LinkExtractor(allow=('^http://lol.duowan.com/[a-z]+/$')),\n callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n item = LolItem(urls=[])\n item['urls'].extend(response.xpath(x_urls).extract())\n try:\n item['name'] = response.xpath(x_name).extract()[0]\n except IndexError:\n return\n yield item\n", "sub_path": "lol/lol/spiders/lol_spider.py", "file_name": "lol_spider.py", "file_ext": "py", "file_size_in_byte": 969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scrapy.spiders.CrawlSpider", "line_number": 10, "usage_type": "name"}, {"api_name": "scrapy.spiders.Rule", "line_number": 21, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 21, "usage_type": "call"}, {"api_name": "items.LolItem", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "534071108", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"编辑滤镜的基本操作测试用例.\"\"\"\r\nfrom iOS import script_ultils as sc\r\nimport time\r\nfrom unittest import TestCase\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom iOS import iOS_elements,base as ba\r\n\r\nclass TestEditFilter(TestCase):\r\n \"\"\"编辑滤镜的基本操作测试类.\"\"\"\r\n\r\n # 获取屏幕尺寸\r\n width, height = sc.get_size()\r\n img_path = sc.path_lists[0]\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n sc.driver.launch_app()\r\n time.sleep(3)\r\n\r\n @classmethod\r\n def tearDownClass(cls):\r\n time.sleep(3)\r\n sc.driver.close_app()\r\n\r\n def test_edit_filter(self):\r\n \"\"\"剪辑-滤镜.\"\"\"\r\n sc.logger.info('剪辑-滤镜')\r\n fun_name = 'test_edit_filter'\r\n\r\n sc.logger.info('打开一个草稿视频')\r\n ba.home_first_click('更多草稿')\r\n\r\n sc.logger.info('点击草稿封面')\r\n ba.open_draft(iOS_elements.el_studio_draft)\r\n sc.capture_screen(fun_name, self.img_path)\r\n\r\n sc.logger.info('点击“镜头编辑”')\r\n WebDriverWait(sc.driver, 5, 1).until(\r\n lambda x: x.find_element_by_name(\"镜头编辑\")).click()\r\n sc.capture_screen(fun_name, self.img_path)\r\n\r\n sc.logger.info('点击\"滤镜\"')\r\n WebDriverWait(sc.driver, 5, 1).until(\r\n lambda x: x.find_element_by_name(\"滤镜\")).click()\r\n\r\n sc.logger.info('点击\"下载更多\"')\r\n btn_more = '下载更多'\r\n ba.more_download(btn_more)\r\n\r\n sc.logger.info('使用滤镜')\r\n ba.material_used(iOS_elements.el_store_download2)\r\n\r\n sc.logger.info('确认添加')\r\n WebDriverWait(sc.driver, 5, 1).until(\r\n lambda x: x.find_element_by_name(iOS_elements.el_confirm_btn)).click()\r\n sc.capture_screen(fun_name, self.img_path)\r\n\r\n sc.logger.info('点击“存草稿”按钮')\r\n WebDriverWait(sc.driver, 5, 1).until(\r\n lambda el: el.find_element_by_name(\"存草稿\")).click()\r\n sc.logger.info('剪辑-滤镜测试完成')\r\n", "sub_path": "iOS/VivaVideo/test_creations/test_edit/test_filter.py", "file_name": "test_filter.py", "file_ext": "py", "file_size_in_byte": 2087, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "iOS.script_ultils.get_size", "line_number": 13, "usage_type": "call"}, {"api_name": "iOS.script_ultils", "line_number": 13, "usage_type": "name"}, {"api_name": "iOS.script_ultils.path_lists", "line_number": 14, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 14, "usage_type": "name"}, {"api_name": "iOS.script_ultils.driver.launch_app", "line_number": 18, "usage_type": "call"}, {"api_name": "iOS.script_ultils.driver", "line_number": 18, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 18, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "iOS.script_ultils.driver.close_app", "line_number": 24, "usage_type": "call"}, {"api_name": "iOS.script_ultils.driver", "line_number": 24, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 24, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 28, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 28, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 28, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 31, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 31, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 31, "usage_type": "name"}, {"api_name": "iOS.base.home_first_click", "line_number": 32, "usage_type": "call"}, {"api_name": "iOS.base", "line_number": 32, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 34, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 34, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 34, "usage_type": "name"}, {"api_name": "iOS.base.open_draft", "line_number": 35, "usage_type": "call"}, {"api_name": "iOS.base", "line_number": 35, "usage_type": "name"}, {"api_name": "iOS.iOS_elements.el_studio_draft", "line_number": 35, "usage_type": "attribute"}, {"api_name": "iOS.iOS_elements", "line_number": 35, "usage_type": "name"}, {"api_name": "iOS.script_ultils.capture_screen", "line_number": 36, "usage_type": "call"}, {"api_name": "iOS.script_ultils", "line_number": 36, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 38, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 38, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 39, "usage_type": "call"}, {"api_name": "iOS.script_ultils.driver", "line_number": 39, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 39, "usage_type": "name"}, {"api_name": "iOS.script_ultils.capture_screen", "line_number": 41, "usage_type": "call"}, {"api_name": "iOS.script_ultils", "line_number": 41, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 43, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 43, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 44, "usage_type": "call"}, {"api_name": "iOS.script_ultils.driver", "line_number": 44, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 44, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 47, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 47, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 47, "usage_type": "name"}, {"api_name": "iOS.base.more_download", "line_number": 49, "usage_type": "call"}, {"api_name": "iOS.base", "line_number": 49, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 51, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 51, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 51, "usage_type": "name"}, {"api_name": "iOS.base.material_used", "line_number": 52, "usage_type": "call"}, {"api_name": "iOS.base", "line_number": 52, "usage_type": "name"}, {"api_name": "iOS.iOS_elements.el_store_download2", "line_number": 52, "usage_type": "attribute"}, {"api_name": "iOS.iOS_elements", "line_number": 52, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 54, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 54, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 54, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 55, "usage_type": "call"}, {"api_name": "iOS.script_ultils.driver", "line_number": 55, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 55, "usage_type": "name"}, {"api_name": "iOS.iOS_elements.el_confirm_btn", "line_number": 56, "usage_type": "attribute"}, {"api_name": "iOS.iOS_elements", "line_number": 56, "usage_type": "name"}, {"api_name": "iOS.script_ultils.capture_screen", "line_number": 57, "usage_type": "call"}, {"api_name": "iOS.script_ultils", "line_number": 57, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 59, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 59, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 59, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 60, "usage_type": "call"}, {"api_name": "iOS.script_ultils.driver", "line_number": 60, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 60, "usage_type": "name"}, {"api_name": "iOS.script_ultils.logger.info", "line_number": 62, "usage_type": "call"}, {"api_name": "iOS.script_ultils.logger", "line_number": 62, "usage_type": "attribute"}, {"api_name": "iOS.script_ultils", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "390750939", "text": "from Vector import *\nimport pygame as game\nimport numpy as np\nfrom Colisions import *\n\n\nclass Manipulator:\n\n def __init__(self, position, orientation, length, width):\n self.translation = np.array([position.x, position.y])\n self.orientation = orientation\n self.length = length # y\n self.width = width # x\n\n self.boundary_x = [-self.width/2., self.width/2.0]\n self.boundary_y = [-self.length/2., self.length/2.0]\n self.R = np.array([[np.cos(orientation), -np.sin(orientation)], [np.sin(orientation), np.cos(orientation)]])\n\n def move(self, dx, dy):\n self.translation[0] += dx\n self.translation[1] += dy\n\n def update_particle(self, particle):\n # convert particle to frame of reference of manipulator\n position_frame_manipulator = self._to_manipulator_frame(np.array([particle.position.x, particle.position.y]))\n if self.is_in(position_frame_manipulator):\n previous_frame_manipulator = self._to_manipulator_frame(np.array([particle.previous.x, particle.previous.y]))\n update_particle_manipulator(position_frame_manipulator, previous_frame_manipulator, particle.material, self.boundary_x, self.boundary_y)\n position = self._to_world_frame(position_frame_manipulator)\n previous = self._to_world_frame(previous_frame_manipulator)\n particle.position.x = position[0]\n particle.position.y = position[1]\n particle.previous.x = previous[0]\n particle.previous.y = previous[1]\n return True\n return False\n\n def is_in(self, point):\n if (point[0] < self.width/2.) and (point[0] > -self.width/2.) and (point[1] < self.length/2.) and (point[1] > -self.length/2.):\n return True\n return False\n\n def _to_manipulator_frame(self, p):\n return np.dot(np.transpose(self.R), p) - np.dot(np.transpose(self.R), self.translation)\n\n def _to_world_frame(self, p):\n return np.dot(self.R, p) + self.translation\n\n def render(self, screen, world_to_pixel):\n pos1 = self.translation - np.array([ self.length/2., self.width/2.])\n pos2 = self.translation - np.array([ self.length/2., -self.width/2.])\n pos3 = self.translation - np.array([-self.length/2., -self.width/2.])\n pos4 = self.translation - np.array([-self.length/2., self.width/2.])\n\n pos1 = world_to_pixel(pos1)\n pos2 = world_to_pixel(pos2)\n pos3 = world_to_pixel(pos3)\n pos4 = world_to_pixel(pos4)\n\n game.draw.line(screen, (255, 0, 0), pos1, pos2, 1)\n game.draw.line(screen, (255, 0, 0), pos2, pos3, 1)\n game.draw.line(screen, (255, 0, 0), pos3, pos4, 1)\n game.draw.line(screen, (255, 0, 0), pos4, pos1, 1)\n\n", "sub_path": "src/Manipulator.py", "file_name": "Manipulator.py", "file_ext": "py", "file_size_in_byte": 2767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "596589149", "text": "\"\"\"Программа-сервер\"\"\"\n\nimport socket\nimport sys\nimport json\nimport select\nfrom time import time\nfrom errors import IncorrectDataRecivedError, NonDictInputError\nfrom common.jimbase import JIMBase\nfrom common.json_messenger import JSONMessenger\nfrom decorator import Log, LOGGER\nfrom descriptors import CheckPort, CheckHost\nfrom metaclasses import ServerInspector\n\n\nclass JIMServer(JIMBase, metaclass=ServerInspector):\n transport = None\n clients = []\n messages = []\n # Словарь, со��ержащий имена пользователей и соответствующие им сокеты.\n names = dict()\n listen_address = CheckHost()\n listen_port = CheckPort()\n\n # @Log()\n def start(self):\n LOGGER.info(f'Запущен сервер, порт для подключений: {self.listen_port}, '\n f'адрес с которого принимаются подключения: {self.listen_address}. '\n f'Если адрес не указан, принимаются соединения с любых адресов.')\n # Готовим сокет\n self.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.transport.bind((self.listen_address, self.listen_port))\n self.transport.settimeout(0.5)\n\n # Слушаем порт\n self.transport.listen(self.MAX_CONNECTIONS)\n\n # Основной цикл программы сервера\n def process(self):\n # Ждём подключения, если таймаут вышел, ловим исключение.\n try:\n client, client_address = self.transport.accept()\n except OSError:\n pass\n else:\n LOGGER.info(f'Установлено соединение с ПК {client_address}')\n self.clients.append(client)\n\n recv_data_lst = []\n send_data_lst = []\n err_lst = []\n\n # Проверяем на наличие ждущих клиентов\n try:\n if self.clients:\n recv_data_lst, send_data_lst, err_lst = select.select(self.clients, self.clients, [], 0)\n except OSError:\n pass\n\n # принимаем сообщения и если там есть сообщения,\n # кладём в словарь, если ошибка, исключаем клиента.\n if recv_data_lst:\n LOGGER.info('Есть сообщения от клиентов')\n for client_with_message in recv_data_lst:\n try:\n messenger = JSONMessenger(client_with_message)\n message = messenger.get_message()\n self.process_client_message(messenger, message)\n except ConnectionResetError:\n LOGGER.info(f'Клиент {client_with_message.getpeername()} '\n f'отключился от сервера.')\n self.clients.remove(client_with_message)\n except Exception as e:\n LOGGER.info(f'Ошибка при получении сообщения: {e}')\n self.clients.remove(client_with_message)\n\n # Если есть сообщения, обрабатываем каждое.\n for i in self.messages:\n try:\n self.process_message(i, send_data_lst)\n except Exception:\n LOGGER.info(f'Связь с клиентом с именем {i[self.DESTINATION]} была потеряна')\n self.clients.remove(self.names[i[self.DESTINATION]].sock)\n del self.names[i[self.DESTINATION]]\n self.messages.clear()\n\n @Log()\n def process_client_message(self, messenger, message):\n \"\"\"\n Обработчик сообщений от клиентов, принимает словарь -\n сообщение от клинта, проверяет корректность,\n возвращает словарь-ответ для клиента\n\n :param messenger: экземпляр класса JSONMessenger\n :param message: словарь, полученный от клиента\n :return: возвращает словарь с ответом сервера\n \"\"\"\n LOGGER.info(f'Разбор сообщения от клиента : {message}')\n\n if self.ACTION not in message:\n # Иначе отдаём Bad request\n messenger.send_message(self.BAD_REQUEST)\n return\n\n if message[self.ACTION] == self.PRESENCE \\\n and self.TIME in message and self.USER in message and self.ACCOUNT_NAME in message[self.USER]:\n # {'action': 'presence', 'time': 1573760672.167031, 'user': {'account_name': 'Guest'}}\n response = {self.RESPONSE: 200}\n LOGGER.info(f'Cформирован ответ клиенту {response}')\n messenger.send_message(response)\n self.names[message[self.USER][self.ACCOUNT_NAME]] = messenger\n return\n elif message[self.ACTION] == self.MESSAGE \\\n and self.TIME in message and self.MESSAGE_TEXT in message and self.DESTINATION in message \\\n and self.SENDER in message:\n self.messages.append(message)\n LOGGER.info(f'Сообщение от клиента добавлено в очередь')\n return\n # Иначе отдаём Bad request\n messenger.send_message(self.BAD_REQUEST)\n\n @Log()\n def process_message(self, message, listen_socks):\n \"\"\"\n Функция адресной отправки сообщения определённому клиенту. Принимает словарь сообщение,\n список зарегистрированых пользователей и слушающие сокеты. Ничего не возвращает.\n :param message:\n :param names:\n :param listen_socks:\n :return:\n \"\"\"\n if message[self.DESTINATION] in self.names:\n if self.names[message[self.DESTINATION]].sock in listen_socks:\n messenger = self.names[message[self.DESTINATION]]\n messenger.send_message(message)\n LOGGER.info(f'Отправлено сообщение пользователю {message[self.DESTINATION]} '\n f'от пользователя {message[self.SENDER]}.')\n else:\n raise ConnectionError\n else:\n LOGGER.error(\n f'Пользователь {message[self.DESTINATION]} не зарегистрирован на сервере, '\n f'отправка сообщения невозможна.')\n\n\ndef main():\n \"\"\"\n Загрузка параметров командной строки, если нет параметров, то задаём значения по умоланию.\n Сначала обрабатываем порт:\n server.py -p 8079 -a 192.168.1.2\n \"\"\"\n try:\n if '-p' in sys.argv:\n listen_port = sys.argv[sys.argv.index('-p') + 1]\n else:\n listen_port = JIMBase.DEFAULT_PORT\n except IndexError:\n LOGGER.critical('После параметра -\\'p\\' необходимо указать номер порта.')\n sys.exit(1)\n\n # Затем загружаем какой адрес слушать\n\n try:\n if '-a' in sys.argv:\n listen_address = sys.argv[sys.argv.index('-a') + 1]\n else:\n listen_address = JIMBase.DEFAULT_IP_ADDRESS\n\n except IndexError:\n LOGGER.critical('После параметра \\'a\\'- необходимо указать адрес, который будет слушать сервер.')\n sys.exit(1)\n\n my_server = JIMServer()\n my_server.listen_address = listen_address\n my_server.listen_port = int(listen_port)\n my_server.start()\n\n while True:\n my_server.process()\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n LOGGER.critical(str(e))\n\n", "sub_path": "Lesson_2_Grishechkina/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 8270, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "common.jimbase.JIMBase", "line_number": 16, "usage_type": "name"}, {"api_name": "metaclasses.ServerInspector", "line_number": 16, "usage_type": "name"}, {"api_name": "descriptors.CheckHost", "line_number": 22, "usage_type": "call"}, {"api_name": "descriptors.CheckPort", "line_number": 23, "usage_type": "call"}, {"api_name": "decorator.LOGGER.info", "line_number": 27, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 27, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 31, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 31, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 31, "usage_type": "attribute"}, {"api_name": "decorator.LOGGER.info", "line_number": 46, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 46, "usage_type": "name"}, {"api_name": "select.select", "line_number": 56, "usage_type": "call"}, {"api_name": "decorator.LOGGER.info", "line_number": 63, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 63, "usage_type": "name"}, {"api_name": "common.json_messenger.JSONMessenger", "line_number": 66, "usage_type": "call"}, {"api_name": "decorator.LOGGER.info", "line_number": 70, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 70, "usage_type": "name"}, {"api_name": "decorator.LOGGER.info", "line_number": 74, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 74, "usage_type": "name"}, {"api_name": "decorator.LOGGER.info", "line_number": 82, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 82, "usage_type": "name"}, {"api_name": "decorator.LOGGER.info", "line_number": 98, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 98, "usage_type": "name"}, {"api_name": "decorator.LOGGER.info", "line_number": 109, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 109, "usage_type": "name"}, {"api_name": "decorator.LOGGER.info", "line_number": 117, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 117, "usage_type": "name"}, {"api_name": "decorator.Log", "line_number": 87, "usage_type": "call"}, {"api_name": "decorator.LOGGER.info", "line_number": 136, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 136, "usage_type": "name"}, {"api_name": "decorator.LOGGER.error", "line_number": 141, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 141, "usage_type": "name"}, {"api_name": "decorator.Log", "line_number": 122, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 154, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 154, "usage_type": "call"}, {"api_name": "common.jimbase.JIMBase.DEFAULT_PORT", "line_number": 156, "usage_type": "attribute"}, {"api_name": "common.jimbase.JIMBase", "line_number": 156, "usage_type": "name"}, {"api_name": "decorator.LOGGER.critical", "line_number": 158, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 158, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 159, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 164, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 165, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 165, "usage_type": "call"}, {"api_name": "common.jimbase.JIMBase.DEFAULT_IP_ADDRESS", "line_number": 167, "usage_type": "attribute"}, {"api_name": "common.jimbase.JIMBase", "line_number": 167, "usage_type": "name"}, {"api_name": "decorator.LOGGER.critical", "line_number": 170, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 170, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 171, "usage_type": "call"}, {"api_name": "decorator.LOGGER.critical", "line_number": 186, "usage_type": "call"}, {"api_name": "decorator.LOGGER", "line_number": 186, "usage_type": "name"}]} +{"seq_id": "44297602", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 27 11:55:04 2017\r\n@author: mo\r\n\"\"\"\r\n# In[ 0 ]: PREREQUISITES\r\n# Import libraries\r\nimport csv\r\nimport cv2\r\nimport numpy as np\r\nimport keras.backend as K\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Flatten, Dense, Lambda, Dropout\r\nfrom keras.layers.convolutional import Conv2D, Cropping2D\r\nfrom keras.layers.pooling import MaxPooling2D\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, TensorBoard\r\n\r\nfrom keras.initializers import TruncatedNormal, Constant\r\n\r\nimport os\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.model_selection import train_test_split\r\nfrom datetime import datetime as dt\r\nimport time\r\nfrom tools_1712261629 import *\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport pandas\r\nimport random\r\nplt.style.use('ggplot')\r\n\r\n# Parameter\r\nnb_epoch = 5 # 10\r\nbatch_size = 32 # 32 50 1000\r\ndelta = 0.2\r\ninput_shape = (image_height, image_width, 3) # (160, 320, 3)\r\n\r\n# In[ 1 ]: LOAD IMAGES AND LABELS\r\n \r\n# Read driving_log.csv file\r\n#import csv\r\nlines = []\r\nwith open(pathData2+'driving_log.csv') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for line in reader:\r\n lines.append(line)\r\n\r\n# Split dataset into two set: train, validation\r\n#from sklearn.model_selection import train_test_split\r\ntrain_lines, valid_lines = train_test_split(lines, test_size=0.2) # Do we need it?, see model.fit(xx, valid_split=0.2,xxx)\r\n\r\n# In[ X ]: BUILD MODEL TO PREDICT MY STEERING ANGLE\r\n# Generate training and validation dataset\r\ntrain_generator = generator(train_lines, batch_size=batch_size, delta=delta, image_width=image_width,image_height=image_height)\r\nvalid_generator = generator(valid_lines, batch_size=batch_size, delta=delta, image_width=image_width,image_height=image_height)\r\n\r\n\r\n# Transfer learning: end of the pre-trained nvidia model\r\nmodel = load_model(pathData0+'SPIN_model_171224_1944.h5') # load the pre-trained model\r\nmodel.pop() # slice off the end of the neuural network\r\n#model.add(Dense(1)) # add a new fully connected layer\r\n#URL: https://keras.io/initializers/\r\nmodel.add(Dense(1,\r\n kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1, seed=None),\r\n bias_initializer=Constant(value=0.1)))\r\nmodel.layers[-1].name = 'dense_4'\r\n\r\n# Freeze all layers except the last one\r\nfor i, layer in enumerate(model.layers[:-1]):\r\n layer.trainable = True # False # <- modify to freeze all layers (except the last one)\r\n #print('< {} > {}'.format(i, layer.name))\r\n# Unfreeze the last layer\r\nmodel.layers[-1].trainable = True\r\n#print('< {} > {}'.format(len(model.layers)-1, model.layers[-1].name))\r\n\r\nmodel.summary()\r\n\r\n# Compile the model\r\n#import keras.backend as K\r\nadam = Adam(lr=8.5e-4) #, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) \r\n#K.set_value(adam.lr, 0.5 * K.get_value(sgd.lr))\r\nmodel.compile(loss='mse', optimizer=adam) # 'adam')\r\n\r\n# Callbacks.Checkpoint: fault tolerance technique\r\n#from datetime import datetime as dt\r\n#import time\r\npostfix = dt.now().strftime(\"%y%m%d_%H%M\")\r\npathFile0 = pathData6+postfix+'/' \r\npathFile1 = pathFile0+'ckpt_W_{epoch:02d}_{val_loss:.2f}.hdf5'\r\n\r\ncheckpoint = ModelCheckpoint(pathFile1, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\r\nearlystop = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='min')\r\ntensorboard= TensorBoard(log_dir=pathFile0, histogram_freq=0, batch_size=batch_size, write_graph=False, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) \r\ncallbacks_list = [earlystop,checkpoint, tensorboard]\r\n\r\n# Callbacks.History: display training_loss and val_loss\r\n#import matplotlib.pyplot as plt\r\n#import matplotlib.image as mpimg\r\nhistory = model.fit_generator(train_generator,\r\n steps_per_epoch = len(train_lines),\r\n epochs = nb_epoch,\r\n verbose = 1,\r\n callbacks = callbacks_list,\r\n validation_data = valid_generator,\r\n validation_steps = len(valid_lines)\r\n ) \r\n\r\n# Save the trained model\r\nmodel.save_weights(pathFile0+'model_weights.h5')\r\nmodel.save(pathFile0+'model.h5')\r\n\r\n# list history.keys()\r\nprint(history.history.keys())\r\n\r\n# plot loss\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'valid.'], loc='upper left')\r\nplt.show()\r\n\r\n\r\n\r\n'''\r\nNext: download the model and see how well it drives the car in the simulator\r\n'''", "sub_path": "_01_WIP/_Coding_/_3_fine tune_spin/_f10v3_SPIN_transfer_learning_model_1712271931.py", "file_name": "_f10v3_SPIN_transfer_learning_model_1712271931.py", "file_ext": "py", "file_size_in_byte": 4709, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.initializers.TruncatedNormal", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.initializers.Constant", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "name"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.callbacks.TensorBoard", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "296863135", "text": "\"\"\"\nThe implementation of rnn model.\n\nAuthor: Haotian Xue\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport attention\nfrom sen_tensor_model_class import SenTensorModel\n\n\nclass RnnModel(SenTensorModel):\n\n def __init__(self,\n train_data_set,\n test_data_set,\n hyper_parameter,\n train_requirement,\n is_gpu=torch.cuda.is_available(),\n model_save_path=\"../trained_model/rnn_model.pt\"):\n super(RnnModel, self).__init__(train_data_set,\n test_data_set,\n hyper_parameter,\n train_requirement,\n is_gpu,\n model_save_path)\n self.model = self.build_model()\n if is_gpu:\n self.model = self.model.cuda()\n self.train_test()\n\n def build_model(self):\n d_w, hidden_dim, num_layers, dropout_prob = self.extract_hyper_parameters()\n print(\"-----Start building model-----\")\n model = RnnModelHelper(d_w,\n torch.from_numpy(self.test_data_set.word_embedding),\n hidden_dim,\n num_layers=num_layers,\n dropout_p=dropout_prob)\n print(\"-----Finish building model-----\")\n return model\n\n def extract_hyper_parameters(self):\n return self.hyper_parameter[\"d_w\"], \\\n self.hyper_parameter[\"hidden_dim\"], \\\n self.hyper_parameter[\"num_layers\"], \\\n self.hyper_parameter[\"dropout_prob\"]\n\n\nclass RnnModelHelper(nn.Module):\n\n def __init__(self, d_w, word_emb_weight, hidden_dim, num_layers, num_classes=2, dropout_p=0.2):\n super(RnnModelHelper, self).__init__()\n self.hidden_dim = hidden_dim\n self.w2v = nn.Embedding.from_pretrained(word_emb_weight, freeze=False)\n self.rnn_layer = nn.GRU(input_size=d_w,\n hidden_size=hidden_dim,\n num_layers=num_layers,\n bias=True,\n batch_first=True,\n dropout=dropout_p,\n bidirectional=True) # shape: (batch_size, sen_len, hidden_size*2)\n self.rnn_layer.apply(self.weights_init)\n self.linear_layer = nn.Sequential( # int_shape: (batch_size, hidden_size*2)\n nn.Linear(hidden_dim * 2, hidden_dim // 2),\n nn.Tanh(),\n nn.Linear(hidden_dim // 2, num_classes) # out_shape: (batch_size, num_classes)\n )\n self.linear_layer.apply(self.weights_init)\n\n def forward(self, x):\n x = self.w2v(x)\n out, _ = self.rnn_layer(x)\n out = out[:, -1, :] # (batch_size, 1, hidden_dim*2)\n out = torch.squeeze(out, dim=1) # (batch_size, hidden_dim*2)\n m = nn.Tanh()\n out = m(out)\n out = self.linear_layer(out)\n return out\n\n # method to initialize the model weights (in order to improve performance)\n def weights_init(self, m):\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n if isinstance(m, nn.GRU) or isinstance(m, nn.LSTM) or isinstance(m, nn.RNN):\n ih = (param.data for name, param in m.named_parameters() if 'weight_ih' in name)\n hh = (param.data for name, param in m.named_parameters() if 'weight_hh' in name)\n b = (param.data for name, param in m.named_parameters() if 'bias' in name)\n # nn.init.uniform(m.embed.weight.data, a=-0.5, b=0.5)\n for t in ih:\n nn.init.xavier_uniform(t)\n for t in hh:\n nn.init.orthogonal(t)\n for t in b:\n nn.init.constant(t, 0)\n\n\nif __name__ == \"__main__\":\n from data_fetcher.dataFetcher import SenSemEvalDataSet\n print(torch.cuda.is_available())\n train_requirement = {\"num_epoch\": 10, \"batch_size\": 32, \"lr\": 3e-4}\n hyper_parameter = {\"d_w\": 50, \"hidden_dim\": 256, \"num_layers\": 2, \"dropout_prob\": 0.1}\n train_data_set = SenSemEvalDataSet(\"../data/train.txt\", \"../data/word_embedding/glove.6B.50d.txt\", 50, True)\n test_data_set = SenSemEvalDataSet(\"../data/test.txt\", \"../data/word_embedding/glove.6B.50d.txt\", 50, True)\n model = RnnModel(train_data_set, test_data_set, hyper_parameter, train_requirement)\n\n", "sub_path": "model/rnn_model.py", "file_name": "rnn_model.py", "file_ext": "py", "file_size_in_byte": 4581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sen_tensor_model_class.SenTensorModel", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Embedding.from_pretrained", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.squeeze", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.Tanh", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.init.zeros_", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.nn.RNN", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_uniform", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.init.constant", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 98, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 103, "usage_type": "attribute"}, {"api_name": "data_fetcher.dataFetcher.SenSemEvalDataSet", "line_number": 106, "usage_type": "call"}, {"api_name": "data_fetcher.dataFetcher.SenSemEvalDataSet", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "170042022", "text": "import json\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models.aggregates import Sum, Count\nfrom django.db.models.expressions import F, Value, Case, When\nfrom django.db.models.fields import IntegerField, CharField\nfrom django.db.models.functions import Coalesce\nfrom django.db.models.query_utils import Q\nfrom django.http.response import HttpResponse\nfrom rest_framework.generics import GenericAPIView, ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom master_tables.models import ProjectTimeLine, OperatingUnit, Bureau, Organisation, Sector, Sdg\nfrom undp_extra_features.models import ProjectYearSummary\nfrom undp_extra_features.serializers import DownloadProjectDetailSerializer, \\\n OperatingUnitProjectSerializer, OperatingUnitIndexSerializer, BureauIndexSerializer, \\\n DonorIndexSerializer, DonorCountryIndexSerializer, FocusAreaIndexSerializer, SdgIndexSerializer, OutputSerializer\nfrom undp_projects.models import Project\nfrom undp_outputs.models import Output\nfrom utilities.config import EXCLUDED_SECTOR_CODES, CSV_UPLOAD_DIR, UPLOAD_DIR\nfrom utilities.mixins import ResponseViewMixin\n\n\nclass ProjectDetailsView(GenericAPIView, ResponseViewMixin):\n queryset = Project.objects.all()\n\n def get(self, request, *args, **kwargs):\n try:\n project = self.get_object()\n except ObjectDoesNotExist:\n return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',\n ['Project not found'])\n\n serializer = DownloadProjectDetailSerializer(project)\n return Response(serializer.data)\n\n\nclass ProjectYearSummaryView(GenericAPIView, ResponseViewMixin):\n queryset = ProjectTimeLine.objects.filter(is_active=True)\n lookup_field = 'year'\n\n def get(self, request, *args, **kwargs):\n try:\n year = self.get_object().year\n except Exception as e:\n return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',\n ['Please provide a valid year'])\n\n try:\n summary = ProjectYearSummary.objects.get(year=year).summary\n return Response(summary)\n except Exception as e:\n print(e)\n return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',\n ['Details not found'])\n\n\nclass OperatingUnitDataView(GenericAPIView, ResponseViewMixin):\n queryset = OperatingUnit.objects.filter(is_recipient=True)\n\n def get(self, request, *args, **kwargs):\n try:\n operating_unit = self.get_object()\n except Exception as e:\n return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',\n ['Operating unit not found'])\n\n try:\n projects = Project.objects.filter(operating_unit=operating_unit)\n serializer = OperatingUnitProjectSerializer(projects, many=True)\n result = {\n 'op_unit': operating_unit.iso3,\n 'iso_num': operating_unit.iso_num,\n 'projects': serializer.data\n }\n return Response(result)\n except Exception as e:\n print(e)\n return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',\n ['Details not found'])\n\n\nclass OperatingUnitIndexView(APIView, ResponseViewMixin):\n def get(self, request, *args, **kwargs):\n\n try:\n operating_units = OperatingUnit.objects.filter(is_recipient=True, area_type='country')\\\n .annotate(project_count=Count('project', distinct=True),\n funding_sources_count=Count('project__donorfundsplitup__organisation', distinct=True),\n budget_sum=Sum('project__donorfundsplitup__budget'),\n expenditure_sum=Sum('project__donorfundsplitup__expense'))\n serializer = OperatingUnitIndexSerializer(operating_units, many=True)\n\n return Response(serializer.data)\n except Exception as e:\n print(e)\n return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',\n ['Details not found'])\n\n\nclass RegionIndexView(ListAPIView, ResponseViewMixin):\n serializer_class = BureauIndexSerializer\n queryset = Bureau.objects.all()\n\n\nclass DonorIndexView(ListAPIView, ResponseViewMixin):\n serializer_class = DonorIndexSerializer\n queryset = Organisation.objects.all()\n\n\nclass DonorCountryIndexView(ListAPIView, ResponseViewMixin):\n serializer_class = DonorCountryIndexSerializer\n queryset = Organisation.objects.filter(Q(type_level_3__isnull=False) & ~Q(type_level_3=''))\\\n .values('type_level_3', 'level_3_name').annotate(Count('ref_id'))\n\n\nclass sdgIndexView(ListAPIView, ResponseViewMixin):\n serializer_class = SdgIndexSerializer\n queryset = Sdg.objects.all()\n\n\nclass FocusAreaIndexView(APIView, ResponseViewMixin):\n serializer_class = FocusAreaIndexSerializer\n\n def get(self, request, *args, **kwargs):\n file_path = CSV_UPLOAD_DIR + '/static/focus-area-index.json'\n data = json.load(open(file_path))\n return Response(data)\n\n\nclass CrsIndexView(APIView, ResponseViewMixin):\n\n def get(self, request, *args, **kwargs):\n file_path = CSV_UPLOAD_DIR + '/static/crs-index.json'\n data = json.load(open(file_path))\n return Response(data)\n\n\nclass SubLocationIndexView(APIView, ResponseViewMixin):\n\n def get(self, request, *args, **kwargs):\n file_path = CSV_UPLOAD_DIR + '/static/sublocation-national-index.json'\n data = json.load(open(file_path))\n return Response(data)\n\n\nclass ZipFileDownloadView(APIView, ResponseViewMixin):\n def get(self, request, *args, **kwargs):\n file_path = UPLOAD_DIR + '/undp-project-data.zip'\n zip_file = open(file_path, 'rb')\n response = HttpResponse(zip_file, content_type='application/force-download')\n # response = HttpResponse(zip_file, content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % 'undp-project-data.zip'\n return response\n\n\nclass OutputDetailsView(GenericAPIView, ResponseViewMixin):\n queryset = Output.objects.all()\n\n def get(self, request, *args, **kwargs):\n try:\n output = self.get_object()\n except ObjectDoesNotExist:\n return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',\n ['Project not found'])\n\n serializer = OutputSerializer(output)\n return Response(serializer.data)\n\n", "sub_path": "transparencyportal/transparencyportal/undp_extra_features/download_api_views.py", "file_name": "download_api_views.py", "file_ext": "py", "file_size_in_byte": 6755, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.generics.GenericAPIView", "line_number": 25, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 25, "usage_type": "name"}, {"api_name": "undp_projects.models.Project.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "undp_projects.models.Project.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "undp_projects.models.Project", "line_number": 26, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 31, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.DownloadProjectDetailSerializer", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 39, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 39, "usage_type": "name"}, {"api_name": "master_tables.models.ProjectTimeLine.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "master_tables.models.ProjectTimeLine.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "master_tables.models.ProjectTimeLine", "line_number": 40, "usage_type": "name"}, {"api_name": "undp_extra_features.models.ProjectYearSummary.objects.get", "line_number": 51, "usage_type": "call"}, {"api_name": "undp_extra_features.models.ProjectYearSummary.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "undp_extra_features.models.ProjectYearSummary", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 59, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 59, "usage_type": "name"}, {"api_name": "master_tables.models.OperatingUnit.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "master_tables.models.OperatingUnit.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "master_tables.models.OperatingUnit", "line_number": 60, "usage_type": "name"}, {"api_name": "undp_projects.models.Project.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "undp_projects.models.Project.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "undp_projects.models.Project", "line_number": 70, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.OperatingUnitProjectSerializer", "line_number": 71, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 84, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 84, "usage_type": "name"}, {"api_name": "master_tables.models.OperatingUnit.objects.filter", "line_number": 88, "usage_type": "call"}, {"api_name": "master_tables.models.OperatingUnit.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "master_tables.models.OperatingUnit", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.aggregates.Count", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models.aggregates.Count", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models.aggregates.Sum", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models.aggregates.Sum", "line_number": 92, "usage_type": "call"}, {"api_name": "undp_extra_features.serializers.OperatingUnitIndexSerializer", "line_number": 93, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 95, "usage_type": "call"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 102, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 102, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.BureauIndexSerializer", "line_number": 103, "usage_type": "name"}, {"api_name": "master_tables.models.Bureau.objects.all", "line_number": 104, "usage_type": "call"}, {"api_name": "master_tables.models.Bureau.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "master_tables.models.Bureau", "line_number": 104, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 107, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 107, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.DonorIndexSerializer", "line_number": 108, "usage_type": "name"}, {"api_name": "master_tables.models.Organisation.objects.all", "line_number": 109, "usage_type": "call"}, {"api_name": "master_tables.models.Organisation.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "master_tables.models.Organisation", "line_number": 109, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 112, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 112, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.DonorCountryIndexSerializer", "line_number": 113, "usage_type": "name"}, {"api_name": "master_tables.models.Organisation.objects.filter", "line_number": 114, "usage_type": "call"}, {"api_name": "master_tables.models.Organisation.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "master_tables.models.Organisation", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.query_utils.Q", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models.aggregates.Count", "line_number": 115, "usage_type": "call"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 118, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 118, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.SdgIndexSerializer", "line_number": 119, "usage_type": "name"}, {"api_name": "master_tables.models.Sdg.objects.all", "line_number": 120, "usage_type": "call"}, {"api_name": "master_tables.models.Sdg.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "master_tables.models.Sdg", "line_number": 120, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 123, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 123, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.FocusAreaIndexSerializer", "line_number": 124, "usage_type": "name"}, {"api_name": "utilities.config.CSV_UPLOAD_DIR", "line_number": 127, "usage_type": "name"}, {"api_name": "json.load", "line_number": 128, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 129, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 132, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 132, "usage_type": "name"}, {"api_name": "utilities.config.CSV_UPLOAD_DIR", "line_number": 135, "usage_type": "name"}, {"api_name": "json.load", "line_number": 136, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 137, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 140, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 140, "usage_type": "name"}, {"api_name": "utilities.config.CSV_UPLOAD_DIR", "line_number": 143, "usage_type": "name"}, {"api_name": "json.load", "line_number": 144, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 145, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 148, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 148, "usage_type": "name"}, {"api_name": "utilities.config.UPLOAD_DIR", "line_number": 150, "usage_type": "name"}, {"api_name": "django.http.response.HttpResponse", "line_number": 152, "usage_type": "call"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 158, "usage_type": "name"}, {"api_name": "utilities.mixins.ResponseViewMixin", "line_number": 158, "usage_type": "name"}, {"api_name": "undp_outputs.models.Output.objects.all", "line_number": 159, "usage_type": "call"}, {"api_name": "undp_outputs.models.Output.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "undp_outputs.models.Output", "line_number": 159, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 164, "usage_type": "name"}, {"api_name": "undp_extra_features.serializers.OutputSerializer", "line_number": 168, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "333459585", "text": "# imports and tools\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.mllib.tree import DecisionTree, DecisionTreeModel\nfrom pyspark.mllib.util import MLUtils\nfrom pyspark.sql import SparkSession\nimport os\nimport numpy as np\n\n# create SparkContext object\nspark = SparkSession.builder.appName(\"Unit04_DecisionTrees\").getOrCreate()\nsc = spark.sparkContext\n\ndata = MLUtils.loadLibSVMFile(sc, '{}/data/mllib/sample_libsvm_data.txt'.format(os.environ['SPARK_HOME']))\ndata.take(1)\n\n# set aside test data\n(trainingData, test_data) = data.randomSplit([0.7, 0.3])\n\n## standard decicion tree\n# build decision tree model\nmodel = DecisionTree.trainClassifier(trainingData\n ,numClasses = 2\n ,objectFeaturesInfo = {}\n ,impurity = 'gini'\n ,maxDepth = 5\n ,maxBins = 32\n )\n\n# prediction and test error\npredictions = model.predict(test_data.map(lambda x: x.features))\nlabelsAndPredictions = test_data.map(lambda lp: lp.label).zip(predictions)\n\n# count incorrect\ntestError = labelsAndPredictions.filter(lambda lp: lp[0] != lp[1]).count() / float(test_data.count())\nprint('test error: {}'.format(testError))\nprint(model.toDebugString())\n\n# save model\nmodel.save(sc, './decisionTreeModel')\n# reload = DecisionTreeModel.load(sc, './decisionTreeModel')\n\n## random forest\nfrom pyspark.mllib.tree import RandomForest\nfrom pyspark.mllib.util import MLUtils\n\n# build model\nmodel = RandomForest.trainClassifier(trainingData\n ,numClasses = 2\n ,objectFeaturesInfo = {}\n ,numTrees = 3\n ,featureSubsetStrategy = 'auto'\n ,impurity = 'gini'\n ,maxDepth = 4\n ,maxBins = 32\n )\n\n# prediction and test error\npredictions = model.predict(test_data.map(lambda x: x.features))\nlabelsAndPredictions = test_data.map(lambda lp: lp.label).zip(predictions)\n\n# count incorrect\ntestError = labelsAndPredictions.filter(lambda lp: lp[0] != lp[1]).count() / float(test_data.count())\nprint('test error: {}'.format(testError))\nprint(model.toDebugString())\n\n# save model\nmodel.save(sc, './randomForestModel')\n# reload = DecisionTreeModel.load(sc, './randomForestModel')\n\n", "sub_path": "academics/SparkML/DecisionTrees.py", "file_name": "DecisionTrees.py", "file_ext": "py", "file_size_in_byte": 2454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 10, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 10, "usage_type": "name"}, {"api_name": "pyspark.mllib.util.MLUtils.loadLibSVMFile", "line_number": 13, "usage_type": "call"}, {"api_name": "pyspark.mllib.util.MLUtils", "line_number": 13, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pyspark.mllib.tree.DecisionTree.trainClassifier", "line_number": 21, "usage_type": "call"}, {"api_name": "pyspark.mllib.tree.DecisionTree", "line_number": 21, "usage_type": "name"}, {"api_name": "pyspark.mllib.tree.RandomForest.trainClassifier", "line_number": 47, "usage_type": "call"}, {"api_name": "pyspark.mllib.tree.RandomForest", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "296652020", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom .models import Transform, Trans, CoordOper\nfrom .forms import PostForm, EllipsoidForm, TransNew\nfrom django.http.response import HttpResponse\nfrom django.template.loader import get_template\nfrom django.views.generic.edit import DeleteView\nfrom django.core import management\nfrom django.core.management import call_command\n\n#from django.urls import reverse_lazy\n#from .scripts.\n\n# Create your views here.\ndef about (request):\n return render(request, 'about.html')\ndef post_list(request):\n posts = Transform.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'calculation/post_list.html', {'posts': posts})\ndef post_detail(request, pk):\n post = get_object_or_404(Transform, pk=pk)\n return render(request, 'calculation/post_detail.html', {'post': post})\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'calculation/post_edit.html', {'form': form})\n\n# My first real view for coordinate transformation\ndef coord_transf(request, init_coo):\n return render(request, init_coo)\n\n# There are three methods to create pages\ndef basic_one(request):\n view = 'basic_one (my one)'\n html = \"\" % view\n return HttpResponse(html)\ndef template_two(request):\n view = 'template_two (my second)'\n t = get_template('my_view.html')\n html = t.render(Context({'name': view}))\n return HttpResponse(html)\ndef template_three(request):\n new_view = 'template_three (my 3-d)'\n return render(request, 'my_view.html', {'name': new_view})\n\ndef transformations(request):\n transs = Trans.objects.order_by('-transDate')\n return render(request, 'calculation/transformations.html', {'transformations': transs})\n\ndef transform(request, tra_id):\n transf_post = get_object_or_404(Trans, pk=tra_id)\n return render(request, 'calculation/transformation.html', {'transformation': transf_post})\n\ndef trans_new(request):\n if request.method == \"POST\":\n form = TransNew(request.POST)\n if form.is_valid():\n trans = form.save(commit=False)\n trans.transAuthor = request.user\n trans.transDate = timezone.now()\n# trans.transOutData = trans.transInData + \"kdsjghdfks\"\n trans.save()\n call_command('coo_conv', trans.pk)\n \n return redirect('trans_get', tra_id=trans.pk)\n else:\n form = TransNew()\n return render(request, 'calculation/transformation_new.html', {'form': form}) \n\ndef trans_edit(request, tra_id):\n trans = get_object_or_404(Trans, pk=tra_id)\n if request.method == \"POST\":\n form = TransNew(request.POST, instance=trans)\n if form.is_valid():\n trans = form.save(commit=False)\n trans.transAuthor = request.user\n trans.transDate = timezone.now()\n trans.save()\n call_command('coo_conv', trans.pk)\n return redirect('trans_get', tra_id=trans.pk)\n else:\n form = TransNew(instance=trans)\n return render(request, 'calculation/transformation_new.html', {'form': form})\n\"\"\"\ndef trans_del (DeleteView):\n model = Trans\n success_url = reverse_lazy(transformations)\n\"\"\"\ndef trans_del(request, tra_id):\n trans = get_object_or_404(Trans, pk=tra_id)\n if request.method == \"POST\":\n form = TransNew(request.POST, instance=trans)\n if form.is_valid():\n trans.delete()\n return redirect(transformations)\n else:\n form = TransNew(instance=trans)\n return render(request, 'calculation/transformation_del.html', {'form': form})\n\ndef ell_new(request):\n if request.method == \"POST\":\n form = EllipsoidForm(request.POST or None)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('ell_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'calculation/ell_edit.html', {'form': form})\n\ndef csmanager(request):\n coosyss = CoordOper.objects.filter(coordOperDate__lte=timezone.now()).order_by('coordOperDate')\n return render(request, 'calculation/cs_manager.html', {'csmanager': coosyss})\n\ndef csys(request, csys_id):\n cooSys = get_object_or_404(CoordOper, pk=csys_id)\n return render(request, 'calculation/csys.html', {'cooSys': cooSys}) ", "sub_path": "calculation/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Transform.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Transform.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Transform", "line_number": 18, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 18, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Transform", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.http.response.HttpResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.response.HttpResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Trans.objects.order_by", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Trans.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.Trans", "line_number": 55, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Trans", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}, {"api_name": "forms.TransNew", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 68, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 68, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 73, "usage_type": "call"}, {"api_name": "forms.TransNew", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Trans", "line_number": 79, "usage_type": "argument"}, {"api_name": "forms.TransNew", "line_number": 81, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 85, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 85, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 87, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "forms.TransNew", "line_number": 90, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 91, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Trans", "line_number": 98, "usage_type": "argument"}, {"api_name": "forms.TransNew", "line_number": 100, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "forms.TransNew", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 106, "usage_type": "call"}, {"api_name": "forms.EllipsoidForm", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 115, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}, {"api_name": "models.CoordOper.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "models.CoordOper.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.CoordOper", "line_number": 121, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 121, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 121, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 122, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 125, "usage_type": "call"}, {"api_name": "models.CoordOper", "line_number": 125, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "80477458", "text": "from http.server import BaseHTTPRequestHandler, HTTPServer\nfrom urllib.parse import urlparse, parse_qs\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_GET(self):\n parsed = urlparse(self.path)\n params = parse_qs(parsed.query)\n\n if 'user' in params:\n user = params['user'][0] #there might be more than 1 value\n else:\n user = 'unknown user'\n\n msg = \"Hello \"+user+\"\"\n \n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(msg, \"utf-8\"))\ntry:\n myServer = HTTPServer((\"localhost\", 80), MyServer)\n myServer.serve_forever()\nexcept KeyboardInterrupt:\n myServer.server_close()\n", "sub_path": "src/lessonXX.WebServer/demo/username.py", "file_name": "username.py", "file_ext": "py", "file_size_in_byte": 761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 4, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 6, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 7, "usage_type": "call"}, {"api_name": "http.server.HTTPServer", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "471472977", "text": "from django.conf.urls import patterns, include, url\nfrom wsDay.views import login, index, upload, up, more, welcome, users_csv, ip_csv, stars\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\n#from dajaxice.core import dajaxice_autodiscover, dajaxice_config\n#dajaxice_autodiscover()\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'sleepDay.views.home', name='home'),\n # url(r'^sleepDay/', include('sleepDay.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n url('^$', welcome),\n url('^(?P[\\d\\.]+)$', index),\n url('^login$', login),\n url('^upload$', upload),\n url('^limijiaoyinrongyuchupinusers$', users_csv),\n url('^limijiaoyinqinqingfengxianip$', ip_csv),\n url('^stars$', stars),\n url('^up$', up),\n url('^more$', more),\n #url(dajaxice_config.dajaxice_url, include('dajaxice.urls')), \n)\n", "sub_path": "sleepDay/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1182, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "wsDay.views.welcome", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "wsDay.views.index", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "wsDay.views.login", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "wsDay.views.upload", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "wsDay.views.users_csv", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "wsDay.views.ip_csv", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "wsDay.views.stars", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "wsDay.views.up", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "wsDay.views.more", "line_number": 30, "usage_type": "argument"}]} +{"seq_id": "115047318", "text": "\"\"\"\nTools for the spatial analysis of neighborhood change\n\"\"\"\n\nfrom exceptions import TypeError\n\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport osmnx as ox\nimport pandas as pd\nfrom libpysal import attach_islands\nfrom libpysal.weights.Contiguity import Queen, Rook\nfrom libpysal.weights.Distance import KNN\n\nfrom .cluster import (\n affinity_propagation,\n gaussian_mixture,\n kmeans,\n max_p,\n skater,\n spectral,\n spenc,\n ward,\n ward_spatial,\n)\n\nstore = pd.HDFStore(\"oslnap/data/us_geo.h5\", \"r\")\n\nstates = store[\"states\"]\nstates = gpd.GeoDataFrame(states)\nstates[~states.geoid.isin([\"60\", \"66\", \"69\", \"72\", \"78\"])]\nstates.crs = {\"init\": \"epsg:4326\"}\nstates = states.set_index(\"geoid\")\n\ncounties = store[\"counties\"]\ncounties = gpd.GeoDataFrame(counties)\ncounties.crs = {\"init\": \"epsg:4326\"}\ncounties = counties.set_index(\"geoid\")\n\ntracts = store[\"tracts\"]\ntracts = gpd.GeoDataFrame(tracts)\ntracts.crs = {\"init\": \"epsg:4326\"}\ntracts = tracts.set_index(\"geoid\")\n\ndata = pd.HDFStore(\"oslnap/data/data.h5\", \"r\")\nif dataset.isin([\"ltdb\", \"ncdb\", \"nhgis\"]):\n df = data[dataset]\nelif dataset == \"external\":\n df = dataset\nelse:\n raise ValueError(\"dataset must be one of 'ltdb', 'ncdb', 'nhgis', 'external'\")\n\n\nclass Metro(object):\n\n \"\"\"\n A class that stores neighborhood data and analytics for a metropolitan\n region\n \"\"\"\n\n def __init__(self, name, boundary):\n\n self.name = name\n self.boundary = boundary\n self.tracts = tracts[\n tracts.set_geometry(\"point\").within(self.boundary.unary_union)\n ]\n self.tracts = ox.project_gdf(self.tracts)\n self.counties = ox.project_gdf(\n counties[counties.index.isin(np.unique(self.tracts.index.str[0:5]))]\n )\n self.states = ox.project_gdf(\n states[states.index.isin(np.unique(self.tracts.index.str[0:2]))]\n )\n self.data = df[df.index.isin(self.tracts.index)]\n\n def plot(self, column=None, year=2015, ax=None, plot_counties=True, **kwargs):\n \"\"\"\n convenience function for plotting tracts in the metro area\n \"\"\"\n if ax is not None:\n ax = ax\n else:\n fig, ax = plt.subplots(figsize=(15, 15))\n colname = column.replace(\"_\", \" \")\n colname = colname.title()\n plt.title(self.name + \": \" + colname + \", \" + str(year), fontsize=20)\n plt.axis(\"off\")\n\n ax.set_aspect(\"equal\")\n plotme = self.tracts.join(self.data[self.data.year == year], how=\"left\")\n plotme = plotme.dropna(subset=[column])\n plotme.plot(column=column, alpha=0.8, ax=ax, **kwargs)\n\n if plot_counties is True:\n self.counties.plot(\n edgecolor=\"#5c5353\", linewidth=0.8, facecolor=\"none\", ax=ax, **kwargs\n )\n\n return ax\n\n def cluster(\n self,\n n_clusters=6,\n method=None,\n best_model=False,\n columns=None,\n preference=-1000,\n damping=0.8,\n verbose=False,\n **kwargs\n ):\n \"\"\"\n Create a geodemographic typology by running a cluster analysis on the\n metro area's neighborhood attributes\n\n Parameters\n ----------\n n_clusters : int\n the number of clusters to derive\n method : str\n the clustering algorithm used to identify neighborhood types\n columns : list-like\n subset of columns on which to apply the clustering\n\n Returns\n -------\n DataFrame\n\n \"\"\"\n data = self.data.copy()\n allcols = columns + [\"year\"]\n data = data[allcols]\n data.dropna(inplace=True)\n data[columns] = data.groupby(\"year\")[columns].apply(\n lambda x: (x - x.mean()) / x.std(ddof=0)\n )\n data\n # option to autoscale the data w/ mix-max or zscore?\n specification = {\n \"ward\": ward,\n \"kmeans\": kmeans,\n \"ap\": affinity_propagation,\n \"gm\": gaussian_mixture,\n \"spectral\": spectral,\n }\n model = specification[method](\n data.drop(columns=\"year\"),\n n_clusters=n_clusters,\n preference=preference,\n damping=damping,\n best_model=best_model,\n verbose=verbose,\n **kwargs\n )\n labels = model.labels_.astype(str)\n clusters = pd.DataFrame(\n {method: labels, \"year\": data.year, \"geoid\": data.index}\n )\n clusters[\"joinkey\"] = clusters.index + clusters.year.astype(str)\n clusters = clusters.drop(columns=\"year\")\n geoid = self.data.index\n self.data[\"joinkey\"] = self.data.index + self.data.year.astype(str)\n if method in self.data.columns:\n self.data.drop(columns=method, inplace=True)\n self.data = self.data.merge(clusters, on=\"joinkey\", how=\"left\")\n self.data[\"geoid\"] = geoid\n self.data.set_index(\"geoid\", inplace=True)\n\n def cluster_spatial(\n self,\n n_clusters=6,\n weights_type=\"rook\",\n method=None,\n best_model=False,\n columns=None,\n threshold_variable=None,\n threshold=10,\n **kwargs\n ):\n \"\"\"\n Create a *spatial* geodemographic typology by running a cluster\n analysis on the metro area's neighborhood attributes and including a\n contiguity constraint\n\n Parameters\n ----------\n n_clusters : int\n the number of clusters to derive\n weights_type : str 'queen' or 'rook'\n spatial weights matrix specification\n method : str\n the clustering algorithm used to identify neighborhood types\n columns : list-like\n subset of columns on which to apply the clustering\n\n Returns\n -------\n DataFrame\n\n \"\"\"\n\n if threshold_variable == \"count\":\n allcols = columns + [\"year\"]\n data = self.data[allcols].copy()\n data = data.dropna(how=\"any\")\n data[columns] = data.groupby(\"year\")[columns].apply(\n lambda x: (x - x.mean()) / x.std(ddof=0)\n )\n\n elif threshold_variable is not None:\n threshold_var = data[threshold_variable]\n allcols = list(columns).remove(threshold_variable) + [\"year\"]\n data = self.data[allcols].copy()\n data = data.dropna(how=\"any\")\n data[columns] = data.groupby(\"year\")[columns].apply(\n lambda x: (x - x.mean()) / x.std(ddof=0)\n )\n\n else:\n allcols = columns + [\"year\"]\n data = self.data[allcols].copy()\n data = data.dropna(how=\"any\")\n data[columns] = data.groupby(\"year\")[columns].apply(\n lambda x: (x - x.mean()) / x.std(ddof=0)\n )\n\n tracts = self.tracts.copy()\n\n def _build_data(data, tracts, year, weights_type):\n df = data.loc[data.year == year].copy()\n tracts = tracts.copy()[tracts.index.isin(df.index)]\n weights = {\"queen\": Queen, \"rook\": Rook}\n w = weights[weights_type].from_dataframe(\n tracts.reset_index(), idVariable=\"geoid\"\n )\n # drop islands from dataset and rebuild weights\n df.drop(index=w.islands, inplace=True)\n tracts.drop(index=w.islands, inplace=True)\n w = weights[weights_type].from_dataframe(\n tracts.reset_index(), idVariable=\"geoid\"\n )\n knnw = KNN.from_dataframe(tracts, k=1)\n\n return df, w, knnw\n\n years = [1980, 1990, 2000, 2010, 2015]\n annual = []\n for year in years:\n df, w, knnw = _build_data(data, tracts, year, weights_type)\n annual.append([df, w, knnw])\n\n datasets = dict(zip(years, annual))\n\n specification = {\n \"spenc\": spenc,\n \"ward_spatial\": ward_spatial,\n \"skater\": skater,\n \"max_p\": max_p,\n }\n\n clusters = []\n for key, val in datasets.items():\n if threshold_variable == \"count\":\n threshold_var = np.ones(len(val[0]))\n val[1] = attach_islands(val[1], val[2])\n elif threshold_variable is not None:\n threshold_var = threshold_var[threshold.index.isin(val[0].index)].values\n val[1] = attach_islands(val[1], val[2])\n else:\n threshold_var = None\n model = specification[method](\n val[0].drop(columns=\"year\"),\n w=val[1],\n n_clusters=n_clusters,\n threshold_variable=threshold_var,\n threshold=threshold,\n **kwargs\n )\n labels = model.labels_.astype(str)\n labels = pd.DataFrame(\n {method: labels, \"year\": val[0].year, \"geoid\": val[0].index}\n )\n clusters.append(labels)\n\n clusters = pd.concat(clusters)\n clusters.set_index(\"geoid\")\n clusters[\"joinkey\"] = clusters.index + clusters.year.astype(str)\n clusters = clusters.drop(columns=\"year\")\n geoid = self.data.index\n self.data[\"joinkey\"] = self.data.index + self.data.year.astype(str)\n if method in self.data.columns:\n self.data.drop(columns=method, inplace=True)\n self.data = self.data.merge(clusters, on=\"joinkey\", how=\"left\")\n self.data[\"geoid\"] = geoid\n self.data.set_index(\"geoid\", inplace=True)\n", "sub_path": "osnap/analytics/analytics.py", "file_name": "analytics.py", "file_ext": "py", "file_size_in_byte": 9585, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.HDFStore", "line_number": 28, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 31, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.HDFStore", "line_number": 46, "usage_type": "call"}, {"api_name": "osmnx.project_gdf", "line_number": 69, "usage_type": "call"}, {"api_name": "osmnx.project_gdf", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 71, "usage_type": "call"}, {"api_name": "osmnx.project_gdf", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "cluster.ward", "line_number": 142, "usage_type": "name"}, {"api_name": "cluster.kmeans", "line_number": 143, "usage_type": "name"}, {"api_name": "cluster.affinity_propagation", "line_number": 144, "usage_type": "name"}, {"api_name": "cluster.gaussian_mixture", "line_number": 145, "usage_type": "name"}, {"api_name": "cluster.spectral", "line_number": 146, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 158, "usage_type": "call"}, {"api_name": "libpysal.weights.Contiguity.Queen", "line_number": 234, "usage_type": "name"}, {"api_name": "libpysal.weights.Contiguity.Rook", "line_number": 234, "usage_type": "name"}, {"api_name": "libpysal.weights.Distance.KNN.from_dataframe", "line_number": 244, "usage_type": "call"}, {"api_name": "libpysal.weights.Distance.KNN", "line_number": 244, "usage_type": "name"}, {"api_name": "cluster.spenc", "line_number": 257, "usage_type": "name"}, {"api_name": "cluster.ward_spatial", "line_number": 258, "usage_type": "name"}, {"api_name": "cluster.skater", "line_number": 259, "usage_type": "name"}, {"api_name": "cluster.max_p", "line_number": 260, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 266, "usage_type": "call"}, {"api_name": "libpysal.attach_islands", "line_number": 267, "usage_type": "call"}, {"api_name": "libpysal.attach_islands", "line_number": 270, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 282, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 287, "usage_type": "call"}]} +{"seq_id": "310999647", "text": "import singleTest as sT\nimport numpy as np\nimport time\nimport matplotlib.pyplot as pl\nfrom matplotlib import rc, rcParams\nrcParams.update({'font.size': 20})\n#set up tex interpreter\nrc('text',usetex=True)\n\nclass multiTest(object):\n\t\"\"\"\n\tdoing multi test tasks for pi estimate\n\t\"\"\"\n\n\tdef __init__(self, Nlist):\n\t\tself.Nlist = np.array(Nlist)\n\t\ttimelist = []\n\t\tpilist = []\n\n\t\tfor N in self.Nlist:\n\t\t\tnewtest = sT.singleTest(N)\n\t\t\tnew_pi = newtest.estimate()\n\t\t\tnew_t = newtest.time_estimate()\n\t\t\ttimelist += [new_t]\n\t\t\tpilist += [new_pi]\n\n\t\tself.tlist = np.array(timelist)\n\t\tself.pilist = np.array(pilist)\n\n\tdef plottime_N(self):\n\n\t\tpl.figure()\n\t\tpl.clf()\n\t\tpl.hold(True)\n\t\tpl.plot(self.Nlist, self.tlist, 'r-.')\n\t\tpl.xlabel('N')\n\t\tpl.ylabel('t (s)')\n\t\tpl.xscale('log')\n\t\tpl.show()\n\n\tdef plotprecision_N(self):\n\n\t\tpl.figure()\n\t\tpl.clf()\n\t\tpl.hold(True)\n\t\tpl.plot(self.Nlist, self.pilist - np.pi, 'r-.')\n\t\tpl.xlabel('N')\n\t\tpl.ylabel('pi(calc) - pi')\n\t\tpl.xscale('log')\n\t\tpl.show()", "sub_path": "day2/exercises/sandy-day2/pi_estimate/multiTest.py", "file_name": "multiTest.py", "file_ext": "py", "file_size_in_byte": 976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.rcParams.update", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 6, "usage_type": "name"}, {"api_name": "matplotlib.rc", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "singleTest.singleTest", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 46, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "84334596", "text": "\"\"\"Somnething\"\"\"\n\nimport plotly.graph_objects as go\n\n\ndef make_sankey(df, labels):\n fig = go.Figure(\n data=[\n go.Sankey(\n node=dict(\n pad=15,\n thickness=20,\n line=dict(color=\"black\", width=0.5),\n label=labels,\n ),\n link=dict(\n source=df[\n \"cms_2021_idx\"\n ], # indices correspond to labels, eg A1, A2, A1, B1, ...\n target=list(df[\"cms_now_idx\"]),\n value=list(df[\"count\"]),\n ),\n )\n ]\n )\n\n fig.update_layout(title_text=\"Basic Sankey Diagram\", font_size=10)\n return fig\n", "sub_path": "resc/resc.py", "file_name": "resc.py", "file_ext": "py", "file_size_in_byte": 750, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "plotly.graph_objects.Figure", "line_number": 7, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 7, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Sankey", "line_number": 9, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "365646945", "text": "from flask import Flask, render_template, redirect, request, url_for, flash, session\nfrom flask.ext.login import login_user, logout_user, login_required, \\\n current_user\nfrom . import admin\nfrom .. import db\nfrom ..models import User, Role, Institute, DigitizationProtocol\nfrom ..email import send_email\nfrom .forms import EditProfileAdminForm, EditInstituteAdminForm, EditProtocolAdminForm\nfrom ..decorators import admin_required, permission_required, crossdomain\n\n\n@admin.route('/users')\n@login_required\n@admin_required\ndef users_page():\n users = User.query.all()\n return render_template('admin/users.html', users = users)\n\n# edit a different profile as admin\n@admin.route('/edit-profile/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_profile_admin(id):\n user = User.query.get_or_404(id)\n form = EditProfileAdminForm(user=user)\n if form.validate_on_submit():\n user.email = form.email.data\n user.username = form.username.data\n user.confirmed = form.confirmed.data\n user.role = Role.query.get(form.role.data)\n user.name = form.name.data\n user.about_me = form.about_me.data\n user.institute = form.institute.data\n user.institute_confirmed = form.institute_confirmed.data\n db.session.commit()\n \n flash('The profile has been updated.')\n return redirect(url_for('admin.users_page'))\n \n form.email.data = user.email\n form.username.data = user.username\n form.confirmed.data = user.confirmed\n form.role.data = user.role_id\n form.name.data = user.name\n form.about_me.data = user.about_me\n form.institute.data = user.institute\n form.institute_confirmed.data = user.institute_confirmed\n return render_template('admin/user_form.html', form=form, user=user)\n\n@admin.route('/institutes')\n@login_required\n@admin_required\ndef institutes_page():\n institutes = Institute.query.all()\n return render_template('admin/institutes.html', institutes = institutes)\n\n@admin.route('/edit-institute/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_institute_admin(id):\n institute = Institute.query.get_or_404(id)\n form = EditInstituteAdminForm(institute=institute)\n if form.validate_on_submit():\n \n institute.institution_name = form.institution_name.data\n institute.institution_short = form.institution_short.data\n institute.main_contact_email = form.main_contact_email.data\n institute.main_contact_name = form.main_contact_name.data\n institute.institution_address = form.institution_address.data\n institute.head_compadrino = form.head_compadrino.data\n institute.research_group = form.research_group.data\n institute.date_joined = form.date_joined.data\n institute.department = form.department.data\n institute.country = form.country.data\n institute.website = form.website.data\n db.session.commit()\n\n flash('The institute profile has been updated.')\n return redirect(url_for('admin.institutes_page'))\n \n form.institution_name.data = institute.institution_name\n form.institution_short.data = institute.institution_short\n form.main_contact_email.data = institute.main_contact_email \n form.main_contact_name.data = institute.main_contact_name\n form.institution_address.data = institute.institution_address\n form.head_compadrino.data = institute.head_compadrino\n form.research_group.data = institute.research_group\n form.date_joined.data = institute.date_joined \n form.department.data = institute.department\n form.country.data = institute.country \n form.website.data = institute.website \n\n return render_template('admin/institute_form.html', form=form, institute=institute)\n\n@admin.route('/new-institute', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef new_institute_admin():\n institute = Institute()\n form = EditInstituteAdminForm(institute=institute)\n flash('Date format must be \"YYYY-mm-dd\".')\n if form.validate_on_submit():\n \n institute.institution_name = form.institution_name.data\n institute.institution_short = form.institution_short.data\n institute.main_contact_email = form.main_contact_email.data\n institute.main_contact_name = form.main_contact_name.data\n institute.institution_address = form.institution_address.data\n institute.research_group = form.research_group.data\n institute.head_compadrino = form.head_compadrino.data\n institute.date_joined = form.date_joined.data\n institute.department = form.department.data\n institute.country = form.country.data\n institute.website = form.website.data\n \n db.session.add(institute)\n db.session.commit()\n\n flash('The institute has been added')\n return redirect(url_for('admin.institutes_page'))\n\n return render_template('admin/institute_form.html', form=form)\n\n@admin.route('/new_protocol', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef new_protocol_admin():\n protocol = DigitizationProtocol()\n form = EditProtocolAdminForm(protocol=protocol)\n if form.validate_on_submit():\n \n protocol.field_name = form.field_name.data\n protocol.name_in_csv = form.name_in_csv.data\n protocol.database_model = form.database_model.data\n protocol.field_description = form.field_description.data\n protocol.field_short_description = form.field_short_description.data\n\n \n db.session.add(protocol)\n db.session.commit()\n\n flash('The protocol entry has been added')\n return redirect(url_for('main.protocol_page'))\n\n return render_template('admin/protocol_form.html', form=form)\n\n@admin.route('/edit_protocol/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_protocol_admin(id):\n protocol = DigitizationProtocol.query.get_or_404(id)\n form = EditProtocolAdminForm(protocol=protocol)\n if form.validate_on_submit():\n \n protocol.field_name = form.field_name.data\n protocol.name_in_csv = form.name_in_csv.data\n protocol.database_model = form.database_model.data\n protocol.field_description = form.field_description.data\n protocol.field_short_description = form.field_short_description.data\n\n db.session.add(protocol)\n db.session.commit()\n\n flash('The protocol entry has been edited')\n return redirect(url_for('main.protocol_page'))\n \n form.field_name.data = protocol.field_name \n form.name_in_csv.data = protocol.name_in_csv\n form.database_model.data = protocol.database_model\n form.field_description.data = protocol.field_description\n form.field_short_description.data = protocol.field_short_description\n\n return render_template('admin/protocol_form.html', form=form)\n\n", "sub_path": "app/admin/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "models.User.query.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 13, "usage_type": "name"}, {"api_name": "decorators.admin_required", "line_number": 14, "usage_type": "name"}, {"api_name": "models.User.query.get_or_404", "line_number": 24, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "forms.EditProfileAdminForm", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Role.query.get", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Role.query", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 21, "usage_type": "name"}, {"api_name": "decorators.admin_required", "line_number": 22, "usage_type": "name"}, {"api_name": "models.Institute.query.all", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Institute.query", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Institute", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 51, "usage_type": "name"}, {"api_name": "decorators.admin_required", "line_number": 52, "usage_type": "name"}, {"api_name": "models.Institute.query.get_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Institute.query", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Institute", "line_number": 61, "usage_type": "name"}, {"api_name": "forms.EditInstituteAdminForm", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 58, "usage_type": "name"}, {"api_name": "decorators.admin_required", "line_number": 59, "usage_type": "name"}, {"api_name": "models.Institute", "line_number": 99, "usage_type": "call"}, {"api_name": "forms.EditInstituteAdminForm", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 96, "usage_type": "name"}, {"api_name": "decorators.admin_required", "line_number": 97, "usage_type": "name"}, {"api_name": "models.DigitizationProtocol", "line_number": 128, "usage_type": "call"}, {"api_name": "forms.EditProtocolAdminForm", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 125, "usage_type": "name"}, {"api_name": "decorators.admin_required", "line_number": 126, "usage_type": "name"}, {"api_name": "models.DigitizationProtocol.query.get_or_404", "line_number": 151, "usage_type": "call"}, {"api_name": "models.DigitizationProtocol.query", "line_number": 151, "usage_type": "attribute"}, {"api_name": "models.DigitizationProtocol", "line_number": 151, "usage_type": "name"}, {"api_name": "forms.EditProtocolAdminForm", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.ext.login.login_required", "line_number": 148, "usage_type": "name"}, {"api_name": "decorators.admin_required", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "641168090", "text": "import xlsxwriter\nfrom Request import Request\n\n\nclass XLSxExporter:\n def __init__(self):\n self.request = Request()\n self.workbook = xlsxwriter.Workbook('MeteoInfoTableXLSX.xlsx')\n self.worksheet = self.workbook.add_worksheet()\n self.row = 0\n self.col = 0\n\n def export_to_xlsx(self):\n list_for_values = []\n\n rows = self.request.table.find_all('tr', attrs={'height': '30'})\n\n for row in rows:\n list_for_values.clear()\n cells = row.find_all('td')\n\n for cell in cells:\n list_for_values.append(cell.text)\n\n if len(list_for_values) < 19:\n for i in range(2, 19):\n list_for_values.append('')\n\n # for station, time, temperatureOfAir, airsTempChangeInOneHour, humidity, dewPoint, precipitation, intensity, visibility, trackTemp, tracksTempChangesInOneHour, tracksCondition, routeWarning, freezingPoint, trackTemp2, tracksTemp2ChangesInOneHour, tracksCondition2, routeWarning2, freezingPoint2 in list_for_values:\n # self.worksheet.write(self.row, self.col)\n\n for x in range(19):\n self.worksheet.write(self.row, self.col + x, list_for_values[x])\n self.row += 1\n\n self.worksheet.set_column(0, 18, 23)\n self.workbook.close()\n", "sub_path": "XLSxExporter.py", "file_name": "XLSxExporter.py", "file_ext": "py", "file_size_in_byte": 1343, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "Request.Request", "line_number": 7, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "213935112", "text": "# -*- coding: utf-8 -*-\n#\nimport argparse\n\nfrom .__about__ import __version__\nfrom .main import start_server\n\n\ndef main(argv=None):\n parser = _get_parser()\n args = parser.parse_args(argv)\n\n start_server(args.infile, args.browser)\n return\n\n\ndef _get_parser():\n \"\"\"Parse input options.\"\"\"\n parser = argparse.ArgumentParser(description=(\"Visualize Python profile.\"))\n\n parser.add_argument(\"infile\", type=str, help=\"input runtime or import profile file\")\n\n parser.add_argument(\n \"--no-browser\",\n default=True,\n dest=\"browser\",\n action=\"store_false\",\n help=\"Don't start a web browser (default: do start)\",\n )\n\n parser.add_argument(\n \"--version\",\n \"-v\",\n action=\"version\",\n version=\"%(prog)s \" + (\"(version {})\".format(__version__)),\n )\n return parser\n", "sub_path": "tuna/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "main.start_server", "line_number": 13, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "__about__.__version__", "line_number": 35, "usage_type": "argument"}]} +{"seq_id": "603398162", "text": "from lib.note_parser import NoteParser, NoteError\nfrom lib.note_types import NoteTypesError\nfrom lib.roman_numeral_converter import roman_numeral_to_number\n\nOUTPUT_ERROR = 'I have no idea what you are talking about'\n\n\nclass CommandError(Exception):\n pass\n\n\nclass CommandParser(object):\n\n def __init__(self, notes):\n self.notes = notes\n self.note_types_map = [\n {'valid_note_types': ('intergalactic_number', 'roman_numeral'), 'command': 'record_intergalactic_number'},\n {'valid_note_types': ('commodity_value', 'intergalactic_units'), 'command': 'record_intergalactic_units'},\n {'valid_note_types': ('q_how_much', 'known_intergalactic_number'), 'command': 'convert_intergalactic_number'},\n {'valid_note_types': ('q_how_many', 'commodity_value'), 'command': 'calculate_intergalactic_units'}\n ]\n self.known_intergalactic_numbers = {}\n self.known_commodities = {}\n\n def run_notes(self):\n outputs = []\n for note in self.notes:\n try:\n note_parser = NoteParser(note, self.known_intergalactic_numbers)\n note_section_types, note_sections = note_parser.get_note_section_type()\n note_command = self.get_command_for_note_section_types(note_section_types)\n result = self.run_note_type_command(note_command, note_sections)\n if result:\n outputs.append(result)\n except CommandError:\n outputs.append(OUTPUT_ERROR)\n except NoteError:\n outputs.append(OUTPUT_ERROR)\n except NoteTypesError:\n outputs.append(OUTPUT_ERROR)\n\n return outputs\n\n def get_command_for_note_section_types(self, note_section_types):\n # Get the mapped command for the given note section types\n for note_types in self.note_types_map:\n if note_types['valid_note_types'] == note_section_types:\n return note_types['command']\n\n error_message = 'No valid note types found for note_section_types: {0}'.format(note_section_types)\n raise CommandError(error_message)\n\n def run_note_type_command(self, note_type_command, note_sections):\n # Run command given mapped command of note types\n return getattr(self, note_type_command)(note_sections)\n\n def record_intergalactic_number(self, note_sections):\n # Record the the intergalactic number for given roman numeral\n intergalactic_number = note_sections[0]\n roman_numeral = note_sections[1]\n self.known_intergalactic_numbers[intergalactic_number] = roman_numeral\n\n def record_intergalactic_units(self, note_sections):\n # Record the the intergalactic unit for given commodity\n commodity_value = note_sections[0].split()\n intergalactic_unit = note_sections[1].split()\n intergalactic_unit = float(intergalactic_unit[0])\n roman_numerals = ''.join([self.known_intergalactic_numbers[value] for value in commodity_value[:-1]])\n number = roman_numeral_to_number(roman_numerals)\n commodity = commodity_value[-1:][0]\n commodity_price = intergalactic_unit / number\n self.known_commodities[commodity] = commodity_price\n\n def convert_intergalactic_number(self, note_sections):\n # Convert intergalactic numbers and their roman numeral values into integers\n intergalactic_number = note_sections[1].split()\n roman_numerals = ''.join([self.known_intergalactic_numbers[value] for value in intergalactic_number])\n number = roman_numeral_to_number(roman_numerals)\n return '{0} is {1}'.format(note_sections[1], number)\n\n def calculate_intergalactic_units(self, note_sections):\n # Calculate the intergalactic unit price for given commodity\n commodity_value = note_sections[1].split()\n roman_numerals = ''.join([self.known_intergalactic_numbers[value] for value in commodity_value[:-1]])\n number = roman_numeral_to_number(roman_numerals)\n commodity = commodity_value[-1:][0]\n if commodity in self.known_commodities:\n commodity_price = self.known_commodities.get(commodity)\n intergalactic_units = number * commodity_price\n return '{0} is {1} Credits'.format(note_sections[1], int(intergalactic_units))\n else:\n error_message = 'Unknown intergalactic_units for commodity: {0}'.format(commodity)\n raise CommandError(error_message)\n", "sub_path": "lib/command_parser.py", "file_name": "command_parser.py", "file_ext": "py", "file_size_in_byte": 4498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "lib.note_parser.NoteParser", "line_number": 29, "usage_type": "call"}, {"api_name": "lib.note_parser.NoteError", "line_number": 37, "usage_type": "name"}, {"api_name": "lib.note_types.NoteTypesError", "line_number": 39, "usage_type": "name"}, {"api_name": "lib.roman_numeral_converter.roman_numeral_to_number", "line_number": 69, "usage_type": "call"}, {"api_name": "lib.roman_numeral_converter.roman_numeral_to_number", "line_number": 78, "usage_type": "call"}, {"api_name": "lib.roman_numeral_converter.roman_numeral_to_number", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "572789409", "text": "#!/usr/bin/python\n# -*- coding=utf-8 -*-\n\n# Project: catchcore\n# Class: Hierten\n# Hierarchical dense sub-tensor detection class\n#\n# hierten.py\n# Version: 1.0\n# Goal: Class script\n# Created by @wenchieh on <11/18/2018>\n#\n# Copyright:\n# This software is free of charge under research purposes.\n# For commercial purposes, please contact the author.\n#\n# Created by @wenchieh on <11/18/2018>\n#\n\n\n__author__ = 'wenchieh'\n\n# sys\nfrom copy import *\nimport collections\n\n# third-party lib\nimport numpy as np\nimport numpy.linalg as nplg\n\n\nclass HierTen(object):\n # input para.\n T = None # the input tensor\n ndim = None # the dimension of input tensor\n shape = None # input tensor shape\n # alg. para.\n ps = 0.0 # penalty para. for missing entities p > 0\n hs = -1 # the number of hierarchies (layers)\n beta = 0 # constraints para. (regularization term coef.)\n etas = None # the density constraints between layers (gradually increasing)\n # res para.\n valid_hs = -1\n hsindicator = None\n hsdensity = None\n hnnzs = None\n\n def __init__(self, tensor):\n if len(tensor.vals) <= 0:\n ValueError(\"the input tensor is ZERO!\")\n\n self.T = tensor\n self.shape = tensor.shape\n self.ndim = tensor.ndim\n\n def setting(self, hierarchies=None, penalties=None, constraintpara=None, etas=None):\n self.hs = hierarchies if hierarchies is not None else 10\n self.beta = constraintpara if constraintpara is not None else 1.8\n self.valid_hs = self.hs\n\n if penalties is None:\n self.ps = [1e-3] * self.hs\n elif not isinstance(penalties, collections.Iterable):\n self.ps = [penalties] * self.hs\n elif isinstance(penalties, collections.Iterable):\n if len(penalties) == self.hs:\n self.ps = penalties\n elif len(penalties) == 1:\n self.ps = [penalties[0]] * self.hs\n\n if etas is None:\n halfhs = int(np.round(self.hs / 2.0))\n self.etas = np.asarray([1.5] * halfhs + [1.1] * (self.hs - halfhs))\n elif not isinstance(etas, collections.Iterable):\n self.etas = np.asarray([etas] * self.hs)\n elif isinstance(etas, collections.Iterable):\n if len(etas) == self.hs:\n self.etas = np.asarray(etas, float)\n elif len(etas) == 1:\n self.etas = np.asarray([etas[0]] * self.hs)\n else:\n ValueError(\n \"input parameter etas is invalid, it must be a float or a list with size as hierarchies!\")\n return\n\n def _density_(self, xs):\n density = self.T.ttv(tuple(xs), modes=range(self.ndim))\n density /= np.prod([np.sum(x) * 1.0 for x in xs])\n return density\n\n def _subtensor_nnzs_(self, indicators):\n xs, shape = list(), list()\n for dm in range(self.ndim):\n if len(indicators[dm]) > 0:\n xdm = np.zeros((self.shape[dm]))\n xdm[indicators[dm]] = 1\n else:\n xdm = np.ones((self.shape[dm]))\n xs.append(xdm)\n shape.append(np.sum(xdm, dtype=int))\n nnzs = self.T.ttv(tuple(xs), modes=range(self.ndim))\n\n return int(nnzs), tuple(shape)\n\n def _OBJhierdense_(self, xs, h, Ch):\n '''\n the objective function for measuring the density of sub-tensor induced by indicators [xs]\n :param xs: indicators for each dimension\n :param h: h-th hierarchy of dense tensor, h >= 0\n :param Ch: the density of h-th hierarchy\n :return: the density\n '''\n objval = 0.0\n ttvxs = self.T.ttv(tuple(xs), modes=range(self.ndim))\n objval += -1.0 * (1 + self.ps[h] + (h > 0) * self.beta) * ttvxs\n objval += 1.0 * (self.ps[h] + (h > 0) * Ch * self.beta) * \\\n np.prod([np.sum(x) * 1.0 for x in xs])\n return objval\n\n def _GRADhierdense_(self, xs, dim, h, Ch):\n '''\n the gradient for the objective function of hierarchical density\n :param xs: indicators for each dimension\n :param dim: the dimension for focusing\n :param h: h-th hierarchy of dense tensor, h >= 0\n :param Ch: the density of h-th hierarchy\n :return:\n '''\n xs_nh = xs[:dim] + xs[dim + 1:]\n scale = np.prod([np.sum(x) * 1.0 for x in xs_nh])\n grad_xdim = np.array([0.0] * self.shape[dim])\n ttvxs = self.T.ttv(tuple(xs_nh), modes=range(dim) + range(dim + 1, self.ndim))\n grad_xdim += -1.0 * (1 + self.ps[h] + (h > 0) * self.beta) * np.squeeze(ttvxs.toarray())\n grad_xdim += 1.0 * (self.ps[h] + (h > 0) * Ch * self.beta) * \\\n scale * np.ones((self.shape[dim]))\n return grad_xdim\n\n def _projected_grads_(self, x, gradx, lb, ub):\n # project the gradient for x to lower and upper bounds.\n grad = copy(gradx)\n lbxs = np.where(x == lb)[0]\n grad[lbxs] = np.minimum(0, grad[lbxs])\n ubxs = np.where(x == ub)[0]\n grad[ubxs] = np.maximum(0, grad[ubxs])\n return grad\n\n def _projected_optimize_(self, xs_old, lb, ub, dim, h, Ch, maxiter=5, tol=1e-6):\n \"\"\"\n projected gradient descent optimization algorithm\n to solve the each optimization sub-problem: the h-th hierarchy h \\in [0, self.hs-1]\n :param xs_old: the indicators vector from last update.\n :param lb: the lower bound for the indicator vector\n :param ub: the upper bound for the indicator vector\n :param dim: the optimization dimension\n :param h: the h-th hierarchy\n :param Ch: the density value for the last hierarchy\n :param maxiter: the maximum number of updating iteration\n :param tol: the tolerance value for updating (stop criteria)\n :return: the update gradient vector for x\n \"\"\"\n\n alpha = 1 # the initial step size\n sigma = 0.01 # para. for the line search to select a good step size\n ratio = 0.1 # ratio of the chane of the step size\n ubalpha = 10\n succ = 0\n\n # projected gradient descent alg.\n iter = 0\n old = copy(xs_old)\n new = None\n itercond = True\n while (iter < maxiter): # to be convergence\n if iter > 0:\n old = copy(new)\n\n objold = self._OBJhierdense_(old, h, Ch)\n grad = self._GRADhierdense_(old, dim, h, Ch)\n\n # searching for step-size that satisfies the modified Armijo condition\n while succ <= ubalpha and itercond:\n xsdm_new = np.minimum(np.maximum(old[dim] - alpha * grad, lb),\n ub) # projected to bounded feasible region\n # [usually not entered]\n # spacial case processing for xsdm_new = 0, to make sure that u_new = u - eta*grad_u >= 0\n if np.sum(np.abs(xsdm_new)) == 0:\n xsdm_gradpos = grad > 0\n alpha = np.mean(old[dim][xsdm_gradpos] / grad[xsdm_gradpos])\n xsdm_new = np.minimum(np.maximum(old[dim] - alpha * grad, lb), ub)\n\n # Armijo alg.\n succ += 1\n new = old[:dim] + [xsdm_new] + old[dim + 1:]\n objnew = self._OBJhierdense_(new, h, Ch)\n # Armijo's line search condition\n itercond = (objnew - objold > sigma * np.dot(grad, (new[dim] - old[dim])))\n if itercond:\n alpha *= ratio\n else:\n alpha /= np.sqrt(ratio)\n\n iter += 1\n if nplg.norm(new[dim] - old[dim]) < tol: # for convergence update\n break\n # the updated indicator vector for the given dimension\n return new[dim]\n\n def _optimal_hiertens(self, selects=None, dimension=None, maxiter=100, eps=1e-7, convtol=1e-6, debug=False):\n '''\n get the optimal hierarchical dense sub-tensors\n :param selects: [array | list, default=None], the selected seed for specific focus\n :param maxiter: [int, default=100], the maxiimum iterations\n :param eps: [float, default=1e-7], the tolerance threshold\n :param convtol: [float, default=1e-5], the threshold for convergence.\n :return:\n '''\n if selects is None or dimension is None:\n selects, dimension = None, None\n\n # initialization for Xs and grad-norm\n xhs = list()\n for h in range(self.hs):\n xhs.append([0.01 * np.ones((dm)) for dm in self.shape])\n for dm in range(self.ndim):\n xhs[0][dm] = 0.5 * np.ones((self.shape[dm]))\n\n # the average density for the whole tensor by selecting each element\n C0 = self.etas[0] * np.sum(self.T.vals) * 1.0 / np.prod(np.asarray(self.shape, float))\n # compute the initial gradients for each indicator vector\n grad_x0, grad_xk = list(), list()\n for dm in range(self.ndim):\n grad_x0.append(self._GRADhierdense_(xhs[0], dm, 0, 0))\n grad_xk.append(self._GRADhierdense_(xhs[1], dm, 1, C0))\n\n # initial projected gradient norm\n # lower and upper bounds for indicator vectors\n initnorm = 0.0\n lbs = xhs[1]\n if selects is not None:\n lbs[dimension][np.array(selects, dtype=int)] = 0.9\n ubs = [np.ones((dm)) for dm in self.shape]\n xhs_gradproj = list()\n for dm in range(self.ndim):\n init_xsp = np.zeros((self.shape[dm], self.hs))\n init_xsp[:, 0] = self._projected_grads_(xhs[0][dm], grad_x0[dm], lbs[dm], ubs[dm])\n xskp = self._projected_grads_(xhs[1][dm], grad_xk[dm], lbs[dm], ubs[dm])\n init_xsp[:, 1:] = np.repeat(np.asarray([xskp]), self.hs - 1, 0).T\n xhs_gradproj.append(init_xsp)\n initnorm += nplg.norm(init_xsp[:, 0]) ** 2 + (self.hs -\n 1) * nplg.norm(init_xsp[:, 1]) ** 2\n initnorm = np.sqrt(initnorm)\n\n # iterational optimization\n # dimensional alternative projected gradient descent optimization\n iter = 0\n norm_trace = list([initnorm])\n while (iter < maxiter):\n if iter % 10 == 0:\n print(\"iteration: %d\" % iter)\n xh0_old = deepcopy(xhs[0])\n for dm in range(self.ndim):\n lbx, ubx = xhs[1][dm], np.ones((self.shape[dm],))\n xhs[0][dm] = self._projected_optimize_(xh0_old, lbx, ubx, dm, 0, 0, tol=convtol)\n grad_xsdm = self._GRADhierdense_(xhs[0], dm, 0, 0)\n xhs_gradproj[dm][:, 0] = self._projected_grads_(xhs[0][dm], grad_xsdm, lbx, ubx)\n xh0_old = deepcopy(xhs[0])\n\n # update for the each hierarchy. i.e. k \\in [1, hs)\n for h in range(1, self.hs):\n C = self.etas[h] * self._density_(xhs[h - 1])\n xhs_old = deepcopy(xhs[h])\n # solve the subproblem of xhs[h], and update the dimension alternatively.\n for dm in range(self.ndim):\n # lower bound as the current next hierarchy indicator vector, i.e. nested node constraint.\n lbs_xdm = xhs[h + 1][dm] if h < self.hs - 1 else np.zeros((self.shape[dm],))\n if selects is not None and dimension == dm:\n lbs_xdm[np.asarray(selects, int)] = 0.9\n # lower bound as the last hierarchy indicator vector.\n ubs_xdm = xhs[h - 1][dm]\n xhs[h][dm] = self._projected_optimize_(\n xhs_old, lbs_xdm, ubs_xdm, dm, h, C, tol=convtol)\n grad_xsdm_new = self._GRADhierdense_(xhs[h], dm, h, C)\n xhs_gradproj[dm][:, h] = self._projected_grads_(\n xhs[h][dm], grad_xsdm_new, lbs_xdm, ubs_xdm)\n\n iter += 1\n # early stopping (criteria)\n norm_new = 0.0\n for dm in range(self.ndim):\n norm_new += nplg.norm(xhs_gradproj[dm]) ** 2\n norm_new = np.sqrt(norm_new)\n norm_trace.append(norm_new)\n if norm_new < eps * initnorm:\n break\n\n if iter >= maxiter:\n print(\"Warning: maximum iterators in nls subproblem\")\n if debug:\n print(\"# iters: {}, norm_init: {}, norm_final: {}\".format(\n iter, initnorm, norm_trace[-1]))\n print(norm_trace)\n return xhs\n\n def hieroptimal(self, maxiter=100, eps=1e-7, convtol=1e-5, debug=False):\n '''\n hierarchical optimization for dense sub-tensor detection\n :param maxiter: the maximum number of updating iteration\n :param eps: the tolerance value for updating (stop criteria)\n :param convtol: the threshold for convergence.\n :return:\n '''\n return self._optimal_hiertens(maxiter=maxiter, eps=eps, convtol=convtol, debug=debug)\n\n def queryhiers(self, seeds, dimension, maxiter=100, eps=1e-7, convtol=1e-5):\n '''\n query specific hierarchical dense sub-densors\n :param seeds: queried seeds\n :param dimension: selected dimension\n :return:\n '''\n if seeds is None or dimension is None:\n seeds, dimension = None, None\n ValueError(\"Neither of the dimension and seeds can be [None].\")\n if dimension not in range(self.ndim):\n ValueError(\"selected dimension must be in [0, %d).\" % (self.ndim))\n return self._optimal_hiertens(seeds, dimension, maxiter, eps, convtol)\n\n def hierarchy_indicator(self, optxs, tholds=0.5):\n nhier = len(optxs)\n thetas = list()\n validhs = list()\n indicators = list()\n hnnzs, hshapes, hdensities = list(), list(), list()\n\n if isinstance(tholds, collections.Iterable):\n if len(tholds) < self.ndim:\n thetas = [tholds[0]] * self.ndim\n else:\n thetas = tholds\n elif tholds is None or not isinstance(tholds, collections.Iterable):\n thetas = [tholds] * self.ndim\n\n for h in range(nhier):\n print(\"H{}:\".format(h + 1))\n print(\"{}\".format([(np.min(optxs[h][dm]), np.max(optxs[h][dm]))\n for dm in range(self.ndim)]))\n hidxs, hshp = list(), list()\n isdiff = 0\n for dm in range(self.ndim):\n dmidx = np.where(optxs[h][dm] > thetas[dm])[0]\n hidxs.append(sorted(dmidx))\n hshp.append(len(dmidx))\n if h > 0 and len(indicators) > 0:\n isdiff += int(set(indicators[-1][dm]) != set(dmidx))\n cards = np.prod(np.asarray(hshp, float))\n if cards > 0 and (h == 0 or (h > 0 and isdiff > 0)):\n nnz, _ = self._subtensor_nnzs_(hidxs)\n if h == 0 or (h > 0 and nnz != hnnzs[-1]):\n validhs.append(h)\n indicators.append(hidxs)\n hnnzs.append(nnz)\n hshapes.append(tuple(hshp))\n hdensities.append(nnz * 1.0 / cards)\n\n self.valid_hs = len(validhs)\n self.hsindicator = indicators\n\n return validhs, indicators, hnnzs, hshapes, hdensities\n\n def dump(self):\n print(\"Basic Information:\")\n print(\" n_dims: {}, shape: {}, nnzs: {}, totals: {}\".format(self.ndim, self.shape,\n len(self.T.vals), np.sum(self.T.vals)))\n hiershape_str = \"\"\n for hs in range(self.valid_hs):\n hshp_str = \"(\" + \",\".join(map(str, [len(self.hsindicator[hs][dm])\n for dm in range(self.ndim)])) + \")\"\n hiershape_str += hshp_str + ', '\n print(\" Valid hierarchies:{}, shapes:{}\".format(self.valid_hs, hiershape_str[:-3]))\n\n if self.hsdensity is not None:\n print(\n \"Hierarchical densities and none-zeros (average density: {}):\".format(self.hsdensity[0]))\n print(\" \\t{}\".format(self.hsdensity[1:]))\n print(\" \\t{}\".format(self.hnnzs))\n print(\"done!\")\n", "sub_path": "src/hierten.py", "file_name": "hierten.py", "file_ext": "py", "file_size_in_byte": 16257, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "collections.Iterable", "line_number": 63, "usage_type": "attribute"}, {"api_name": "collections.Iterable", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 73, "usage_type": "call"}, {"api_name": "collections.Iterable", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 75, "usage_type": "call"}, {"api_name": "collections.Iterable", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 203, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 249, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 250, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 290, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 291, "usage_type": "call"}, {"api_name": "collections.Iterable", "line_number": 335, "usage_type": "attribute"}, {"api_name": "collections.Iterable", "line_number": 340, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 373, "usage_type": "call"}]} +{"seq_id": "416647289", "text": "from django.shortcuts import render, redirect\nimport sys\nsys.path.append('../')\nfrom game_engine.card import Card\nfrom game_engine.trick import Trick\n# from agents.rule_based_agent import RuleBasedAgent\nfrom game_engine.round import Round\nfrom game_engine.player import Player\nfrom agents.tf_agents.tf_agents_ppo_agent import TFAgentsPPOAgent\nfrom agents.featurizers import OriginalFeaturizer\nimport tensorflow as tf\ntf.compat.v1.enable_v2_behavior()\n\n\nclass TrickManager(Trick):\n\n def __init__(self, trump_card, players, first_player, played_cards_in_round):\n super().__init__(trump_card, players, first_player, played_cards_in_round)\n self.current_winner = self.first_player\n self.old_card = Card(\"White\", 0)\n self.new_card = Card(\"White\", 0)\n self.trick_cards = dict()\n self.first_card = Card(\"White\", 0)\n\n\nplayers = [TFAgentsPPOAgent(featurizer=OriginalFeaturizer()) for _ in range(3)] + [Player()]\n# players = [RuleBasedAgent(), RuleBasedAgent(), RuleBasedAgent(), Player()]\ngame_round = Round(round_num=1, players=players)\ntrick = TrickManager(Card('White', 0), None, 0, [None])\nblind = True\n\n\ndef home(request):\n\n return render(request, 'start.html')\n\n\ndef play_game(request):\n for player in players:\n if player.__class__.__name__ == \"TFAgentsPPOAgent\":\n player.__init__(keep_models_fixed=True, featurizer=OriginalFeaturizer())\n else:\n player.__init__()\n return redirect('play_round', game_round_no=1)\n\n\ndef play_round(request, game_round_no):\n game_round.__init__(round_num=game_round_no, players=players)\n for player in players:\n player.wins = 0\n player.prediction = -1\n game_round.played_cards = dict()\n for index, player in enumerate(game_round.players):\n game_round.played_cards[index] = []\n return redirect('get_prediction', game_round_no=game_round_no)\n\n\ndef get_prediction(request, game_round_no):\n print(\"Start of Round: \" + str(game_round_no))\n for player in players:\n player.hand = game_round.deck.draw(game_round_no)\n if game_round.deck.is_empty():\n game_round.trump_card = Card(\"White\", 0)\n print(\"Deck is empty. No trump color\")\n else:\n game_round.trump_card = game_round.deck.draw()[0]\n while game_round.trump_card.color == \"White\" and not game_round.deck.is_empty():\n game_round.trump_card = game_round.deck.draw()[0]\n # game_round.trump_card.value = 0\n # if game_round.first_player != 3:\n # print(\"Robot choosing suit\")\n # game_round.trump_card.color = players[game_round.first_player].get_trump_color()\n # else:\n # print(\"User choosing suit\")\n # #TODO integrate the user choosing the trump color\n # game_round.trump_card.color = players[3].hand[0].color\n # else:\n # game_round.played_cards.update({5: game_round.trump_card})\n for player in players:\n print(str(player.hand))\n if game_round_no > 10:\n width = len(players[1].hand) * 55\n height = len(players[2].hand) * 33\n else:\n width = len(players[1].hand) * 66\n height = len(players[2].hand) * 50\n print(\"Trump Color: \" + str(game_round.trump_card.color))\n return render(request, 'game.html', {'left_agent': players[0], 'top_agent': players[1],\n 'right_agent': players[2], 'human_player': players[3],\n 'prediction_phase': True, 'round': game_round_no, 'width': width,\n 'height': height, 'trump_card': game_round.trump_card,\n 'prediction_range': range(0, game_round_no+1), 'blind': blind,\n 'first_player': game_round.first_player + 1})\n\n\ndef receive_prediction(request, game_round_no, prediction):\n for player in players:\n if player.__class__.__name__ != \"Player\":\n player.prediction = int(player.get_prediction(game_round.trump_card, len(players)))\n else:\n player.prediction = prediction\n return redirect('get_play', game_round_no=game_round_no)\n\n\ndef get_play(request, game_round_no):\n print(\"Start of Trick: \" + str(game_round_no - len(players[0].hand)))\n if game_round_no - len(players[0].hand) == 0:\n last_winner = game_round.first_player\n else:\n last_winner = trick.current_winner\n trick.__init__(game_round.trump_card, players, last_winner, game_round.played_cards)\n trick.trick_cards = dict()\n # To convert for the viewer\n trick_cards = []\n for index, player in enumerate(game_round.players):\n trick.trick_cards[index] = None\n print(\"First Player: \" + str(trick.first_player))\n if len(players[0].hand) == game_round_no: # Starting a new round\n trick.first_player = game_round.first_player\n player_index = trick.first_player\n while player_index != 3:\n print(\"Playable Cards for \" + str(player_index) + \":\")\n if player_index == trick.first_player: # First player\n print(players[player_index].get_playable_cards(Card('White', 0)))\n trick.first_card = (players[player_index].play_card(game_round.trump_card, None, trick.trick_cards, players,\n game_round.played_cards, game_round.first_player))\n trick.old_card = trick.first_card\n game_round.played_cards[player_index].append(trick.old_card)\n trick.trick_cards[player_index] = trick.old_card\n else:\n print(players[player_index].get_playable_cards(trick.first_card))\n trick.new_card = (players[player_index].play_card(game_round.trump_card, trick.first_card, trick.trick_cards, players,\n game_round.played_cards, game_round.first_player))\n if trick.first_card.color == \"White\":\n trick.first_card = trick.new_card\n trick.trick_cards[player_index] = trick.new_card\n if trick.is_new_winner(trick.new_card, trick.old_card, game_round.trump_card, trick.first_card):\n trick.current_winner = player_index\n trick.old_card = trick.new_card\n game_round.played_cards[player_index].append(trick.old_card)\n trick_cards.append(trick.trick_cards[player_index])\n player_index = (player_index + 1) % len(players)\n print(\"Current Winner: \" + str(trick.current_winner))\n if game_round_no > 10:\n width = len(players[1].hand) * 55\n height = len(players[2].hand) * 33\n else:\n width = len(players[1].hand) * 66\n height = len(players[2].hand) * 50\n return render(request, 'game.html', {'left_agent': players[0], 'top_agent': players[1],\n 'right_agent': players[2], 'human_player': players[3],\n 'prediction_phase': False, 'round': game_round_no, 'width': width,\n 'height': height, 'trump_card': game_round.trump_card,\n 'trick_cards': trick_cards, 'blind': blind})\n\n\ndef receive_play(request, game_round_no, trick_card):\n player_index = 3\n player_card = Card.int_to_card(trick_card)\n print(player_card.__str__())\n valid = False\n print(\"Playable Cards:\")\n for card in players[player_index].get_playable_cards(trick.first_card):\n print(card)\n if player_card.__str__() == card.__str__():\n valid = True\n break\n if not valid:\n print(\"Incorrect Action\")\n print(players[player_index].get_playable_cards(trick.first_card))\n if game_round_no > 10:\n width = len(players[2].hand) * 55\n height = len(players[2].hand) * 33\n else:\n width = len(players[2].hand) * 66\n height = len(players[2].hand) * 50\n trick_cards = []\n for index in range(len(players)):\n if index >= trick.first_player:\n if trick.trick_cards[index] is not None:\n trick_cards.append(trick.trick_cards[index])\n return render(request, 'game.html', {'left_agent': players[0], 'top_agent': players[1],\n 'right_agent': players[2], 'human_player': players[3],\n 'prediction_phase': False, 'round': game_round_no, 'width': width,\n 'height': height, 'trump_card': game_round.trump_card,\n 'trick_cards': trick_cards, 'blind': blind})\n if trick.first_player == 3:\n trick.first_card = player_card\n elif trick.first_card.color == \"White\":\n trick.first_card = trick.new_card\n trick.new_card = player_card\n trick.trick_cards[player_index] = trick.new_card\n game_round.played_cards[player_index].append(trick.old_card)\n if trick.is_new_winner(trick.new_card, trick.old_card, game_round.trump_card, trick.first_card):\n trick.current_winner = player_index\n trick.old_card = trick.new_card\n card_index = 0\n for card in players[player_index].hand:\n if card.__str__() == trick.new_card.__str__():\n break\n else:\n card_index += 1\n players[player_index].hand.pop(card_index)\n # Move to the next player after the human player\n player_index = 0\n print(\"First Player Index: \" + str(trick.first_player))\n # loop through the rest of the players that are after the human player but haven't played yet\n while player_index != trick.first_player:\n # print(players[player_index].hand)\n print(\"Playable Cards for \" + str(player_index) + \":\")\n print(players[player_index].get_playable_cards(trick.first_card))\n trick.new_card = (players[player_index].play_card(game_round.trump_card, trick.first_card, trick.trick_cards,\n players, game_round.played_cards, game_round.first_player))\n print(\"Chosen Card: \" + str(trick.new_card))\n if trick.first_card.color == \"White\":\n trick.first_card = trick.new_card\n trick.trick_cards[player_index] = trick.new_card\n if trick.is_new_winner(trick.new_card, trick.old_card, game_round.trump_card, trick.first_card):\n trick.current_winner = player_index\n trick.old_card = trick.new_card\n game_round.played_cards[player_index].append(trick.old_card)\n player_index += 1\n print(\"Winning Player: \" + str(trick.current_winner))\n players[trick.current_winner].wins += 1\n return redirect('show_result', game_round_no)\n\n\ndef show_result(request, game_round_no):\n if len(players[0].hand) == 0:\n for player in players:\n if player.prediction == player.wins:\n player.score += player.prediction*10 + 20\n player.accuracy += 1\n else:\n player.score -= abs(player.prediction - player.wins)*10\n next = 'round'\n if game_round_no == 15:\n return redirect('end')\n else:\n game_round.first_player = (game_round.first_player + 1) % len(players)\n next = 'trick'\n trick_cards = []\n for index in range(len(players)):\n if trick.trick_cards[(trick.first_player + index) % 4] is not None:\n trick_cards.append(trick.trick_cards[(trick.first_player + index) % 4])\n if game_round_no > 10:\n width = len(players[1].hand) * 55\n height = len(players[2].hand) * 33\n else:\n width = len(players[1].hand) * 66\n height = len(players[2].hand) * 50\n return render(request, 'game.html', {'left_agent': players[0], 'top_agent': players[1],\n 'right_agent': players[2], 'human_player': players[3],\n 'prediction_phase': False, 'round': game_round_no, 'width': width,\n 'height': height, 'trump_card': game_round.trump_card,\n 'trick_cards': trick_cards, 'blind': blind,\n 'winner': trick.current_winner + 1, 'next': next, 'nr': game_round_no + 1})\n\n\ndef end(request):\n if game_round.round_num < 15:\n return redirect('play_game')\n for player in players:\n player.accuracy = round((player.accuracy * 100 / 15),2)\n return render(request, 'end.html', {'player_1_score': players[0].score, 'player_2_score': players[1].score,\n 'player_3_score': players[2].score, 'player_4_score': players[3].score,\n 'player_1_acc': players[0].accuracy, 'player_2_acc': players[1].accuracy,\n 'player_3_acc': players[2].accuracy, 'player_4_acc': players[3].accuracy})\n", "sub_path": "wizard_site/game/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12979, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.enable_v2_behavior", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 12, "usage_type": "attribute"}, {"api_name": "game_engine.trick.Trick", "line_number": 15, "usage_type": "name"}, {"api_name": "game_engine.card.Card", "line_number": 20, "usage_type": "call"}, {"api_name": "game_engine.card.Card", "line_number": 21, "usage_type": "call"}, {"api_name": "game_engine.card.Card", "line_number": 23, "usage_type": "call"}, {"api_name": "agents.tf_agents.tf_agents_ppo_agent.TFAgentsPPOAgent", "line_number": 26, "usage_type": "call"}, {"api_name": "agents.featurizers.OriginalFeaturizer", "line_number": 26, "usage_type": "call"}, {"api_name": "game_engine.player.Player", "line_number": 26, "usage_type": "call"}, {"api_name": "game_engine.round.Round", "line_number": 28, "usage_type": "call"}, {"api_name": "game_engine.card.Card", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "agents.featurizers.OriginalFeaturizer", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "game_engine.card.Card", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 102, "usage_type": "call"}, {"api_name": "game_engine.card.Card", "line_number": 124, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 150, "usage_type": "call"}, {"api_name": "game_engine.card.Card.int_to_card", "line_number": 159, "usage_type": "call"}, {"api_name": "game_engine.card.Card", "line_number": 159, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 182, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 225, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 238, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 252, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 262, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 265, "usage_type": "call"}]} +{"seq_id": "65844478", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nOperations on OS:\r\n Read, write, remove files\r\n Get a list of files\r\n Working with directories\r\n\"\"\"\r\nimport os, shutil\r\nimport pickle as pkl\r\nimport json\r\nimport yaml\r\nimport joblib\r\n\r\n\r\ndef create_dir(dir_path, empty_if_exists=True):\r\n '''\r\n Creates a folder if not exists,\r\n If does exist, then empties it by default\r\n '''\r\n if os.path.exists(dir_path):\r\n if empty_if_exists:\r\n shutil.rmtree(dir_path)\r\n os.makedirs(dir_path)\r\n else:\r\n os.makedirs(dir_path)\r\n\r\n\r\ndef get_abs_path(src, dest, depth=2):\r\n '''\r\n Get correct absolute path by going deeper from source to find project dir\r\n And join it with dest\r\n '''\r\n project_dir = os.path.abspath(src)\r\n for i in range(depth):\r\n project_dir = os.path.dirname(project_dir)\r\n return os.path.join(project_dir, dest)\r\n\r\n\r\ndef read_txt(filepath, encoding='utf-8', sep='\\n'):\r\n '''\r\n Read txt file and return as a list of strings\r\n '''\r\n with open(filepath, 'r', encoding=encoding) as f:\r\n txt = f.read().strip(sep).split(sep)\r\n return txt\r\n\r\n\r\ndef write_txt(filepath, data, encoding='utf-8', sep='\\n'):\r\n '''\r\n Write a list of objects into a txt file\r\n '''\r\n with open(filepath, 'w', encoding=encoding) as f:\r\n f.writelines(sep.join(data))\r\n\r\n\r\ndef get_class_sizes(base_dir, class_list, print_stats=True):\r\n '''\r\n Print out the length of each class from a folder\r\n And return them as a dictionary\r\n '''\r\n class_lens = {c: len(os.listdir(os.path.join(base_dir,c))) for c in class_list}\r\n if print_stats:\r\n print('Files per each class:')\r\n print(class_lens)\r\n return class_lens\r\n\r\n\r\ndef get_class_files(base_dir, class_list=None):\r\n '''\r\n Get a list of all filenames for each class\r\n And return them as dictionary\r\n If class_list not provided then use all subfolders from base_dir as classes\r\n '''\r\n if class_list is None:\r\n class_list = os.listdir(base_dir)\r\n class_files = {}\r\n for c in class_list:\r\n class_dir = os.path.join(base_dir, c)\r\n class_files[c] = [os.path.join(base_dir,c,f) for f in os.listdir(class_dir)]\r\n return class_files\r\n\r\n\r\ndef pickle_data(filename, value=None, mode='w'):\r\n '''\r\n Save a value into filename as pickle if mode == 'w'\r\n Else if mode == 'r' then reads a value from filename\r\n '''\r\n if mode == 'w':\r\n assert value is not None, 'Do not overwrite filename with None'\r\n with open(filename, 'wb') as f:\r\n pkl.dump(value, f)\r\n return None\r\n elif mode == 'r':\r\n with open(filename, 'rb') as f:\r\n unpickled = pkl.load(f)\r\n return unpickled\r\n else:\r\n raise Exception('mode should be in (\"w\",\"r\")')\r\n\r\n\r\ndef load_json(filepath, **kwargs):\r\n '''\r\n Load a json file and return it\r\n '''\r\n assert os.path.exists(filepath)\r\n with open(filepath, 'r') as f:\r\n data = json.load(f, **kwargs)\r\n return data\r\n\r\n\r\ndef write_json(data, filepath, **kwargs):\r\n '''\r\n Write data into a json file\r\n '''\r\n with open(filepath, 'w') as f:\r\n json.dump(data, f, **kwargs)\r\n\r\n\r\ndef load_env_vars(env_names):\r\n '''\r\n Load environment variables\r\n '''\r\n envs = {name: os.environ.get(name, None) for name in env_names}\r\n for name,val in envs.items():\r\n assert val is not None, f\"environment var ${name} is None\"\r\n return envs\r\n\r\n\r\ndef load_yaml(filepath):\r\n ''' Load yaml file '''\r\n assert os.path.exists(filepath), f\"{filepath} does not exist\"\r\n with open(filepath, 'r') as f:\r\n data = yaml.load(f, Loader=yaml.SafeLoader)\r\n return data\r\n\r\n\r\ndef save_dataframe(dataframe, filepath, silent=True):\r\n ''' Save a dataframe if filepath does not exist else append to it '''\r\n if os.path.exists(filepath):\r\n dataframe.to_csv(filepath, index=False, mode='a', header=False)\r\n else:\r\n dataframe.to_csv(filepath, index=False)\r\n if not silent:\r\n print(f'Data saved to {filepath}')\r\n\r\n\r\ndef write_joblib(data, filepath, **kwargs):\r\n '''\r\n Write data into a json file\r\n '''\r\n with open(filepath, 'wb') as f:\r\n joblib.dump(data, f, **kwargs)\r\n \r\n \r\ndef load_joblib(filepath, **kwargs):\r\n '''\r\n Load a json file and return it\r\n '''\r\n assert os.path.exists(filepath)\r\n with open(filepath, 'rb') as f:\r\n data = joblib.load(f, **kwargs)\r\n return data", "sub_path": "os.py", "file_name": "os.py", "file_ext": "py", "file_size_in_byte": 4483, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 22, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 23, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 79, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 91, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 107, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 116, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 123, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 133, "usage_type": "call"}, {"api_name": "yaml.SafeLoader", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "joblib.dump", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "joblib.load", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "339135120", "text": "from django.contrib.auth.models import AnonymousUser\nfrom rest_framework.permissions import BasePermission, SAFE_METHODS\n\n\nclass IsAdminUserOrReadOnly(BasePermission):\n\n def has_permission(self, request, view):\n if request.method == 'GET':\n return True\n return request.user.is_staff or request.user.is_superuser ", "sub_path": "product/permissions.py", "file_name": "permissions.py", "file_ext": "py", "file_size_in_byte": 475, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.permissions.BasePermission", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "409128747", "text": "import sys\nimport json\n\nimport math\n\nif len(sys.argv) < 4:\n print('missing arguments')\n exit(1)\n\nbatch_file = sys.argv[1]\nstream_file = sys.argv[2]\noutput_file = sys.argv[3]\n\nfriends = {} # a dictionary (map), one degree friends of given id\npurchase = [] # batch and stream data record : pair(id, amount)\n\n\n# bfs finding all users given layer (degree) based on root user id\ndef bfs(user_id, layer):\n res = {user_id}\n stack = list(friends[user_id])\n while len(stack) != 0 and layer > 0:\n new_stack = set([])\n while len(stack) != 0:\n cur = stack.pop()\n if cur not in res:\n res.add(cur)\n if cur in friends.keys():\n new_stack = new_stack.union(friends[cur])\n stack = list(new_stack)\n layer = layer - 1\n return res\n\n\ndef update_new_friendship(id1, id2):\n if id1 not in friends:\n friends[id1] = {id2}\n else:\n friends[id1].add(id2)\n if id2 not in friends:\n friends[id2] = {id1}\n else:\n friends[id2].add(id1)\n\n\ndef update_unfriendship(id1, id2):\n friends[id1].remove(id2)\n friends[id2].remove(id1)\n\n\n# for batch file\ndef init_purchase(user_id, amount):\n purchase.append((user_id, amount))\n\n\ndef convert_to_json(timestamp, id, amount, mean, sd):\n return \"{\\\"event_type\\\":\\\"purchase\\\", \\\"timestamp\\\":\\\"\" + timestamp + \"\\\", \\\"id\\\": \\\"\" + \\\n id + \"\\\", \\\"amount\\\": \\\"\" + amount + \"\\\", \\\"mean\\\": \\\"\" + mean + \"\\\", \\\"sd\\\": \\\"\" + sd + \"\\\"}\";\n\n\n# for streaming data\ndef new_purchase_detect(user_id, amount, time_stamp, D, T, of):\n global friends\n global purchase\n other_ids = bfs(user_id, D)\n other_ids.remove(user_id)\n mean = 0\n temp = []\n for item in purchase[::-1]:\n if len(temp) > T:\n break\n if item[0] in other_ids:\n temp.append(item[1])\n mean = mean + item[1]\n if len(temp) < 3:\n purchase.append((user_id, amount))\n else:\n mean = mean / float(len(temp))\n sd = sum([pow((x - mean), 2) for x in temp])\n sd = math.sqrt(sd / float(len(temp)))\n if sd * 3 + mean < amount:\n print(convert_to_json(time_stamp, str(user_id), format(amount, '.2f'), format(mean, '.2f'),\n format(sd, '.2f')))\n of.writelines(convert_to_json(time_stamp, str(user_id), format(amount, '.2f'), format(mean, '.2f'),\n format(sd, '.2f')) + '\\n')\n # remove unused purchases.\n maxCheck = len(friends) * T\n if max > len(purchase):\n purchase = purchase[-maxCheck:]\n\n\ndef run_json_file(f, stream, D, T, of):\n for line in f:\n event = json.loads(line)\n # event type as purchase\n if event['event_type'] == 'purchase':\n user_id = int(event['id'])\n amount = float(event['amount'])\n time_stamp = event['timestamp']\n if stream:\n new_purchase_detect(user_id, amount, time_stamp, D, T, of)\n else:\n init_purchase(user_id, amount)\n # event type as befriend\n elif event['event_type'] == 'befriend':\n id1 = int(event['id1'])\n id2 = int(event['id2'])\n update_new_friendship(id1, id2)\n # event type as unfriend\n elif event['event_type'] == 'unfriend':\n id1 = int(event['id1'])\n id2 = int(event['id2'])\n update_unfriendship(id1, id2)\n else:\n print('caught error json format')\n exit(1)\n\n\nbf = open(batch_file, \"r\")\nsf = open(stream_file, \"r\")\nof = open(output_file, \"a\")\n\nparams = json.loads(bf.readline())\nD = int(params['D'])\nT = int(params['T'])\nprint(D)\nprint(T)\n\n# start to run\nrun_json_file(bf, False, D, T, of)\nbf.close()\nrun_json_file(sf, True, D, T, of)\nsf.close()\nof.close()\n", "sub_path": "src/process_log.py", "file_name": "process_log.py", "file_ext": "py", "file_size_in_byte": 3850, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 80, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 94, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "330055828", "text": "########\n# Merge features columns into a matrix.\n#\n########\n\nimport sys\nimport os\n\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nif root not in sys.path:\n sys.path.append(root)\n\nimport argparse\nfrom os.path import join\nfrom sklearn.preprocessing import StandardScaler\nimport warnings\nwarnings.simplefilter(action='ignore' )\n\nfrom utilities.settings import Paths\nfrom utilities.utils import get_data, check_folder, compute, get_output_parent_folder, get_path2output\nimport pandas as pd\n\npaths = Paths()\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\"\"\"Objective:\\nGenerate design matrices from features in a given language.\\n\\nInput:\\nLanguage and models.\"\"\")\n parser.add_argument(\"--language\", type=str, default='english', help=\"Language of the text and the model.\")\n parser.add_argument(\"--models\", nargs='+', action='append', default=[], help=\"Name of the models to use to generate the raw features.\")\n parser.add_argument(\"--overwrite\", default=False, action='store_true', help=\"Precise if we overwrite existing files\")\n\n args = parser.parse_args()\n\n features_list = []\n models = args.models[0]\n input_data_type = 'features'\n output_data_type = 'design-matrices'\n extension = '.csv'\n source = 'fMRI'\n\n for model_ in models:\n features_list.append(get_data(args.language, input_data_type, model=model_, source='fMRI')) # retrieve the data to transform and append the list of runs (features data) to features_list) \n\n runs = list(zip(*features_list)) # list of 9 tuples (1 for each run), each tuple containing the features for all the specified models\n # e.g.: [(path2run1_model1, path2run1_model2), (path2run2_model1, path2run2_model2)]\n\n for i in range(len(runs)):\n # create the design matrix for each run\n model_name = '+'.join(models)\n output_parent_folder = get_output_parent_folder(source, output_data_type, args.language, model_name)\n check_folder(output_parent_folder) # check if the output_parent_folder exists and create it if not\n name = os.path.basename(os.path.splitext(runs[i][0])[0])\n run_name = name.split('_')[-1] # extract the name of the run\n path2output = get_path2output(output_parent_folder, output_data_type, args.language, model_name, run_name, extension)\n\n if compute(path2output, overwrite=args.overwrite):\n merge = pd.concat([pd.read_csv(path2features, header=0) for path2features in runs[i]], axis=1) # concatenate horizontaly the read csv files of a run\n matrices = merge.values\n scaler = StandardScaler(with_mean=True, with_std=True)\n scaler.fit(matrices)\n matrices = scaler.transform(matrices)\n result = pd.DataFrame(matrices, columns=merge.columns)\n result.to_csv(path2output, index=False)\n", "sub_path": "fMRI/design-matrices.py", "file_name": "design-matrices.py", "file_ext": "py", "file_size_in_byte": 2860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 17, "usage_type": "call"}, {"api_name": "utilities.settings.Paths", "line_number": 23, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "utilities.utils.get_data", "line_number": 42, "usage_type": "call"}, {"api_name": "utilities.utils.get_output_parent_folder", "line_number": 50, "usage_type": "call"}, {"api_name": "utilities.utils.check_folder", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 52, "usage_type": "call"}, {"api_name": "utilities.utils.get_path2output", "line_number": 54, "usage_type": "call"}, {"api_name": "utilities.utils.compute", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "145096074", "text": "import requests\nimport os\nimport itertools\nfrom bs4 import BeautifulSoup as beSo\nfrom clint.textui import colored\nfrom codemon.CodemonMeta import get_filename\n\ndef make_structure(name, *args):\n basedir = os.path.join(os.getcwd(), args[0]) if args else os.getcwd()\n path = os.path.join(basedir, f'{name}') if args else f'{name}'\n # Check if the question folder exists for the name passed if not make it.\n if not os.path.exists(os.path.join(basedir, f'{name}')):\n os.makedirs(path)\n\n # Check if input file exists for the given name if not make it. \n if not os.path.exists(os.path.join(basedir, f'{name}',f'{name}.in')):\n open(os.path.join(path, f'{name}.in'), 'w').close()\n\n # Check if output file exists for the given name if not make it. \n if not os.path.exists(os.path.join(basedir, f'{name}', f'{name}.op')):\n open(os.path.join(path, f'{name}.op'), 'w').close()\n\n\ndef fetch_tests(file_list, contestName):\n try:\n basedir = os.path.join(os.getcwd(), contestName) if not os.getcwd().split('/')[-1] == contestName else os.getcwd()\n load_page = requests.get(f\"https://codeforces.com/contest/{contestName}/problems\")\n soup = beSo(load_page.content, 'html.parser')\n tests = soup.findAll(\"div\", attrs={\"class\":\"sample-tests\"})\n\n if(len(tests) == 0):\n print(colored.red(\"Wrong contest number provided\"))\n\n else:\n print(colored.green(\"Fetching Inputs and Outputs\"))\n\n for file_name, test in zip(file_list, tests):\n # Make the neccesary folders and files for each source file if not present.\n if os.getcwd().split('/')[-1] == contestName:\n make_structure(file_name)\n else:\n make_structure(file_name, contestName)\n\n # Add inputs to .in files\n for t in test.findAll(\"div\", attrs={\"class\":\"input\"}):\n i = t.pre.text\n with open(os.path.join(basedir, f'{file_name}' , f'{file_name}.in'), 'a') as f:\n f.write(i)\n\n # Add outputs to .op files\n for t in test.findAll(\"div\", attrs={\"class\":\"output\"}):\n o = t.pre.text\n with open(os.path.join(basedir, f'{file_name}' , f'{file_name}.op'), 'a') as f:\n f.write(o)\n print(colored.yellow(\"Inputs and Outputs added\"))\n # In case of any error with scraping, display warning.\n except:\n print(colored.red(\"There was some error fetching the tests !!\"))\n\n\n\n\n", "sub_path": "codemon/CodemonFetch.py", "file_name": "CodemonFetch.py", "file_ext": "py", "file_size_in_byte": 2366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "clint.textui.colored.red", "line_number": 32, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 32, "usage_type": "name"}, {"api_name": "clint.textui.colored.green", "line_number": 35, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 35, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "clint.textui.colored.yellow", "line_number": 55, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 55, "usage_type": "name"}, {"api_name": "clint.textui.colored.red", "line_number": 58, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "479796161", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n数据可视化.\n\npygal: http://www.pygal.org/en/stable/index.html\n\"\"\"\nimport unittest\nimport pygal\n\n\nclass TestDataVisualizing(unittest.TestCase):\n def test_world_map(self):\n worldmap_chart = pygal.maps.world.World()\n worldmap_chart.title = 'Minimum deaths by capital punishement (source: Amnesty International)'\n worldmap_chart.add('In 2012', {\n 'af': 14,\n 'bd': 1,\n 'by': 3,\n 'cn': 1000,\n 'gm': 9,\n 'in': 1,\n 'ir': 314,\n 'iq': 129,\n 'jp': 7,\n 'kp': 6,\n 'pk': 1,\n 'ps': 6,\n 'sa': 79,\n 'so': 6,\n 'sd': 5,\n 'tw': 6,\n 'ae': 1,\n 'us': 43,\n 'ye': 28\n })\n worldmap_chart.render_in_browser()\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "data-models/python-datawrangling/src/gda/datawrangling/test_data_visualizing.py", "file_name": "test_data_visualizing.py", "file_ext": "py", "file_size_in_byte": 912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygal.maps.world.World", "line_number": 14, "usage_type": "call"}, {"api_name": "pygal.maps", "line_number": 14, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "219660072", "text": "# coding: utf-8\n# 3rd depend on\n# inner depend on\nfrom ..util import commutil, mongoutil, influxutil\nfrom . import alertloadutil\n# sys depend on\nfrom multiprocessing import Process\nimport sys\nimport os\n\nMONGODBNAME = 'configureDB'\nINFLUXNAME = 'LoadInfo'\n\n\ndef run(dbconn, influxconn, rabbitmquri):\n alertloadutil.set_conn_info(dbconn, rabbitmquri)\n alertloadutil.check_load(influxconn)\n\n\nif __name__ == \"__main__\":\n fname = os.path.basename(os.path.realpath(sys.argv[0]))\n logger = commutil.logfile()\n logger.info(fname + ' start running')\n\n mongouri = commutil.get_uri('mongo')\n mongoconn = mongoutil.get_mongo_client(mongouri, MONGODBNAME)\n\n influxuri = commutil.get_uri('influx')\n influxconn = influxutil.get_influx_client(influxuri, INFLUXNAME, None, None)\n\n rabbitmquri = commutil.get_uri('rabbitmq')\n\n domain = commutil.get_domain(mongouri)\n mongofilter = {\"$and\": [{\"domain\": domain},\n {\"$or\": [{\"servertype\": None},\n {\"servertype\": {\"$in\": [\"DG\"]}}\n ]\n }\n ]\n }\n dbconnlist = mongoutil.get_mongo_doc(mongoconn, 'dbconfinfo', mongofilter)\n\n for dbconn in dbconnlist:\n p = Process(target=run, args=(dbconn, influxconn, rabbitmquri))\n p.start()\n", "sub_path": "app/alertload/alertload.py", "file_name": "alertload.py", "file_ext": "py", "file_size_in_byte": 1378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.basename", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "util.commutil.logfile", "line_number": 22, "usage_type": "call"}, {"api_name": "util.commutil", "line_number": 22, "usage_type": "name"}, {"api_name": "util.commutil.get_uri", "line_number": 25, "usage_type": "call"}, {"api_name": "util.commutil", "line_number": 25, "usage_type": "name"}, {"api_name": "util.mongoutil.get_mongo_client", "line_number": 26, "usage_type": "call"}, {"api_name": "util.mongoutil", "line_number": 26, "usage_type": "name"}, {"api_name": "util.commutil.get_uri", "line_number": 28, "usage_type": "call"}, {"api_name": "util.commutil", "line_number": 28, "usage_type": "name"}, {"api_name": "util.influxutil.get_influx_client", "line_number": 29, "usage_type": "call"}, {"api_name": "util.influxutil", "line_number": 29, "usage_type": "name"}, {"api_name": "util.commutil.get_uri", "line_number": 31, "usage_type": "call"}, {"api_name": "util.commutil", "line_number": 31, "usage_type": "name"}, {"api_name": "util.commutil.get_domain", "line_number": 33, "usage_type": "call"}, {"api_name": "util.commutil", "line_number": 33, "usage_type": "name"}, {"api_name": "util.mongoutil.get_mongo_doc", "line_number": 41, "usage_type": "call"}, {"api_name": "util.mongoutil", "line_number": 41, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "417898074", "text": "from __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\n# Ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nplt.ion() # interactive mode\n\n\n\"\"\"read values from .csv file\"\"\"\ntraining_file = pd.read_csv('../data/dog_cat/dog_cat_training.csv')\n\n#get value of (n+2)th row\nn = 11\nimg_name = training_file.iloc[n, 0]\nlabel = training_file.iloc[n, 1:]\nlabel = label.astype('float')\n\nprint('Image name: {}'.format(img_name))\nprint('Image shape: {}'.format(label.shape))\nprint('Label: {}'.format(label[0]))\n\n\n\n\"\"\"Prepare dataset\"\"\"\nclass DogCatDataset(Dataset):\n\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.dataset = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.dataset)\n\n #to retrieve a particular sample from dataset\n def __getitem__(self, idx):\n img_name = os.path.join(self.root_dir,\n self.dataset.iloc[idx, 0])\n image = io.imread(img_name)\n label = self.dataset.iloc[idx, 1:].as_matrix()\n label = label.astype('float')\n sample = {'name': self.dataset.iloc[idx, 0], 'image': image, 'label': label}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\"\"\"Test the previous Class\"\"\"\n#show the first 4 samples in the dataset\ndogCatDataset = DogCatDataset(csv_file='../data/dog_cat/dog_cat_training.csv',\n root_dir='../data/dog_cat/training_set/')\n\n\n\nfor i in range(len(dogCatDataset)):\n sample = dogCatDataset[i]\n print(i, sample['name'], sample['label'])\n if i == 3:\n break\n\n\"\"\"\nTransform\nRescale: to scale the image\nRandomCrop: to crop from image randomly. This is data augmentation, to enlarge your limited dataset\nToTensor: to convert the numpy images to torch images (we need to swap axes).\n\"\"\"\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n name, image, label = sample['name'], sample['image'], sample['label']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = transform.resize(image, (new_h, new_w))\n\n\n return {'name': name, 'image': img, 'label': label}\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n name, image, label = sample['name'], sample['image'], sample['label']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n img = image[top: top + new_h,\n left: left + new_w]\n\n return {'name': name, 'image': img, 'label': label}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n name, image, label = sample['name'], sample['image'], sample['label']\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n return {'name': name,\n 'image': torch.from_numpy(image),\n 'label': torch.from_numpy(label)}\n\n\"\"\"Compose transform\"\"\"\nscale = Rescale(256)\ncrop = RandomCrop(128)\ncomposed = transforms.Compose([Rescale(256),\n RandomCrop(224)])\n\n\n\"\"\"\nIterating through the dataset\nEvery time this dataset is sampled:\nAn image is read from the file on the fly\nTransforms are applied on the read image\nSince one of the transforms is random, data is augmentated on sampling\n\"\"\"\ntransformed_dataset = DogCatDataset(csv_file='../data/dog_cat/dog_cat_training.csv',\n root_dir='../data/dog_cat/training_set/',\n transform=transforms.Compose([\n Rescale(256),\n RandomCrop(224),\n ToTensor()\n ]))\n\nfor i in range(len(transformed_dataset)):\n sample = transformed_dataset[i]\n\n print(i, sample['name'], sample['image'].size(), sample['label'].item())\n\n if i == 3:\n break\n\n\"\"\"\nData loader\nBatching the data\nShuffling the data\nLoad the data in parallel using multiprocessing workers.\n\"\"\"\ndataloader = DataLoader(transformed_dataset, batch_size=4,\n shuffle=True, num_workers=4)\n", "sub_path": "1_1_4_input_pipeline_from_custom_dataset_dog_cat.py", "file_name": "1_1_4_input_pipeline_from_custom_dataset_dog_cat.py", "file_ext": "py", "file_size_in_byte": 6058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "warnings.filterwarnings", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 34, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "skimage.io.imread", "line_number": 55, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 55, "usage_type": "name"}, {"api_name": "skimage.transform.resize", "line_number": 109, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 109, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 157, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 162, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 162, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 175, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "238130805", "text": "import os\nimport multiprocessing\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping\nfrom voicemap.callbacks import SiameseValidator\nfrom voicemap.utils.net_utils import preprocess_instances, NShotEvaluationCallback, BatchPreProcessor\nimport voicemap.wav_models as WM\nfrom voicemap.sre_2016 import HDFDataGenerator, WavDataGenerator\nimport config as cfg\n\n# Mute excessively verbose Tensorflow output\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n##############\n# Parameters #\n##############\nn_seconds = 3\nwindow_size = 170\ndownsampling = 4\nbatchsize = 32 # 64\nfilters = 128\nembedding_dimension = 64\ndropout = 0.1\npad = True\nnum_epochs = 200\nnum_evaluation_tasks = 500\nn_shot_classification = 1\nk_way_classification = 5\n\nval_metrics = ['pooled_eer', 'accuracy', 'micro_f1']\n\n# Derived parameters\ninput_length = int(cfg.SRE_SAMPLING_RATE * n_seconds / downsampling)\nparam_str = 'siamese__filters_{}__embed_{}__drop_{}__pad={}'.format(filters, embedding_dimension, dropout, pad)\n\n###################\n# Create datasets #\n###################\n# === debug\n# train_set = 'toy_dataset'\n# val_set = 'toy_dataset'\n# data_dir = '/home/vano/wrkdir/projects_data/sre_2019/'\n# === training\ntrain_set = 'swbd_sre_small_fbank'\nval_set = 'swbd_sre_small_fbank'\ndata_dir = '/home/vano/wrkdir/projects_data/sre_2019/'\n\ntrain = WavDataGenerator(data_dir, train_set, n_seconds, stochastic=True, pad=pad)\nvalid = WavDataGenerator(data_dir, val_set, n_seconds, stochastic=False, pad=pad)\n\nbatch_preprocessor = BatchPreProcessor('siamese', preprocess_instances(downsampling))\ntrain_generator = (batch_preprocessor(batch) for batch in train.yield_verification_batches(batchsize))\nvalid_generator = (batch_preprocessor(batch) for batch in valid.yield_verification_batches(batchsize))\n\n################\n# Define model #\n################\nencoder = WM.get_baseline_convolutional_encoder(filters, embedding_dimension, dropout=dropout)\nsiamese = WM.build_siamese_net(encoder, input_shape=(input_length, 1), distance_metric='uniform_euclidean')\nopt = Adam(clipnorm=1.)\n# opt = RMSprop()\nsiamese.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\nsiamese.summary()\n\n#################\n# Training Loop #\n#################\ncallbacks = [\n # First generate custom n-shot classification metric\n # NShotEvaluationCallback(\n # num_evaluation_tasks, n_shot_classification, k_way_classification, valid),\n SiameseValidator(batch_gen=valid,\n num_tasks=num_evaluation_tasks,\n n_shot=1,\n k_way=2, # number of speakers sampled\n metrics=val_metrics,\n monitor='pooled_eer',\n mode='min',\n preprocessor=batch_preprocessor),\n # Then log and checkpoint\n CSVLogger(os.path.join(cfg.PATH, 'logs/{}.csv'.format(param_str))),\n ModelCheckpoint(\n os.path.join(cfg.PATH, 'models/{}.hdf5'.format(param_str)),\n monitor='pooled_eer',\n mode='min',\n save_best_only=True,\n verbose=True),\n ReduceLROnPlateau(\n monitor='pooled_eer',\n mode='min',\n verbose=1),\n EarlyStopping(\n monitor='pooled_eer',\n patience=15,\n verbose=1,\n mode='min',\n min_delta=0.001),\n TensorBoard(\n log_dir=os.path.join(cfg.PATH, 'logs'),\n write_graph=True)\n]\n\nsiamese.fit_generator(\n generator=train_generator,\n steps_per_epoch=len(train) // batchsize,\n validation_data=valid_generator,\n validation_steps=100,\n epochs=num_epochs,\n workers=multiprocessing.cpu_count(),\n verbose=2,\n use_multiprocessing=True,\n callbacks=callbacks\n)\n", "sub_path": "experiments/train_wav_siamese.py", "file_name": "train_wav_siamese.py", "file_ext": "py", "file_size_in_byte": 3752, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "config.SRE_SAMPLING_RATE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "voicemap.sre_2016.WavDataGenerator", "line_number": 48, "usage_type": "call"}, {"api_name": "voicemap.sre_2016.WavDataGenerator", "line_number": 49, "usage_type": "call"}, {"api_name": "voicemap.utils.net_utils.BatchPreProcessor", "line_number": 51, "usage_type": "call"}, {"api_name": "voicemap.utils.net_utils.preprocess_instances", "line_number": 51, "usage_type": "call"}, {"api_name": "voicemap.wav_models.get_baseline_convolutional_encoder", "line_number": 58, "usage_type": "call"}, {"api_name": "voicemap.wav_models", "line_number": 58, "usage_type": "name"}, {"api_name": "voicemap.wav_models.build_siamese_net", "line_number": 59, "usage_type": "call"}, {"api_name": "voicemap.wav_models", "line_number": 59, "usage_type": "name"}, {"api_name": "keras.optimizers.Adam", "line_number": 60, "usage_type": "call"}, {"api_name": "voicemap.callbacks.SiameseValidator", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.callbacks.CSVLogger", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "config.PATH", "line_number": 81, "usage_type": "attribute"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "config.PATH", "line_number": 83, "usage_type": "attribute"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.callbacks.TensorBoard", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "config.PATH", "line_number": 99, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "335761764", "text": "\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.actor.Actor import Actor\nfrom direct.directutil.Mopath import Mopath\nfrom direct.interval.IntervalGlobal import *\n\nclass Application(ShowBase):\n\n def __init__(self):\n\n ShowBase.__init__(self)\n\n modelPath = \"/e/Material/v11.egg\"\n # modelPath4 = \"/e/Material/house4.egg\"\n # modelPath9 = \"/e/Material/house9.egg\"\n\n model = self.loader.loadModel(modelPath)\n model.reparentTo(self.render)\n model.setPos(0, 0, 0)\n model.setTwoSided(True)\n # model4 = self.loader.loadModel(modelPath4)\n # model4.reparentTo(self.render)\n # model4.setPos(30, 30, 0)\n #\n # model9 = self.loader.loadModel(modelPath9)\n # model9.reparentTo(self.render)\n # model9.setPos(-30, -30, 0)\n\n self.cam.lookAt(0, 0, 0)\n self.cam.setPos(0, -100, 100)\n\napp = Application()\napp.run()\n", "sub_path": "codes/DraftCode/MopathDemo.py", "file_name": "MopathDemo.py", "file_ext": "py", "file_size_in_byte": 923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "direct.showbase.ShowBase.ShowBase", "line_number": 7, "usage_type": "name"}, {"api_name": "direct.showbase.ShowBase.ShowBase.__init__", "line_number": 11, "usage_type": "call"}, {"api_name": "direct.showbase.ShowBase.ShowBase", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "9722806", "text": "import requests\nimport queue\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\n\n\nroots = ['https://www.douban.com',\n 'https://www.bing.com',\n 'https://onedrive.live.com/?gologin=1&WT.mc_id=O16_BingHP']\n\n# a set of visited url\nvisited = set()\n\n# a set of url that need to be read\nurls_to_read = queue.Queue()\n\n\ndef handle_page(page, page_url):\n \"\"\"\n 这里的page是文本格式的\n \"\"\"\n soup = BeautifulSoup(page, 'lxml')\n for link in soup.find_all('a'):\n url = link.get('href')\n if url:\n url = urljoin(page_url, url)\n url = url.split('#')[0]\n if url[0:4] == 'http':\n if url not in visited:\n urls_to_read.put(url)\n\n\ndef main():\n # init\n for root in roots:\n urls_to_read.put(root)\n\n while not urls_to_read.empty():\n url = urls_to_read.get()\n if url not in visited:\n # to show\n print(url)\n\n try:\n response = requests.get(url)\n except requests.exceptions.ConnectionError:\n print(\"can't open url {}\".format(url))\n continue\n else:\n response.encoding = 'utf-8'\n handle_page(response.text, url)\n visited.add(url)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "crawltune.py", "file_name": "crawltune.py", "file_ext": "py", "file_size_in_byte": 1346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "queue.Queue", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "68305595", "text": "from django.shortcuts import render\nfrom rest_framework import viewsets, permissions\nfrom .models import Language, Paradigm, Programmer\nfrom .serializers import LanguageSerializer, ProgrammerSerializer, ParadigmSerializer\n\nclass LanguageView(viewsets.ModelViewSet):\n\tqueryset = Language.objects.all()\n\tserializer_class = LanguageSerializer\n\tpermission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass ProgrammerView(viewsets.ModelViewSet):\n\tqueryset = Programmer.objects.all()\n\tserializer_class = ProgrammerSerializer\n\nclass ParadigmView(viewsets.ModelViewSet):\n\tqueryset = Paradigm.objects.all()\n\tserializer_class = ParadigmSerializer", "sub_path": "api_example/restapi_project/restapi_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Language.objects.all", "line_number": 7, "usage_type": "call"}, {"api_name": "models.Language.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "models.Language", "line_number": 7, "usage_type": "name"}, {"api_name": "serializers.LanguageSerializer", "line_number": 8, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticatedOrReadOnly", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Programmer.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Programmer.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Programmer", "line_number": 12, "usage_type": "name"}, {"api_name": "serializers.ProgrammerSerializer", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Paradigm.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Paradigm.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Paradigm", "line_number": 16, "usage_type": "name"}, {"api_name": "serializers.ParadigmSerializer", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "513154912", "text": "from django.shortcuts import render\n\ndef welcome(request):\n\treturn render(request, 'index.html', {'msg':'Hello World!'})\n\ndef restaurant_list(request):\n\trestaurants=[\n\t\t{\n\t\t\t'name':'Bella',\n\t\t\t'type': 'Italian'\n\t\t},\n\t\t{\n\t\t\t'name':'Zara',\n\t\t\t'type':'Indian'\n\t\t},\n\t\t{\n\t\t\t'name':'Bazza',\n\t\t\t'type':'Kuwaiti'\n\t\t},\n\t]\n\tcontext = {\n\t\t'my_list': restaurants,\n\t }\n\treturn render(request, 'list.html', context)\n\n\ndef restaurant_detail(request):\n\trestaurantsdet={\n\t'name':'Bella', 'type':'Italian'\n\t}\n\n\tcontext = {'my_object': restaurantsdet,\n\n\t}\n\treturn render(request, 'detail.html', context)", "sub_path": "restaurants/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.render", "line_number": 4, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "572466272", "text": "# -*- coding: utf-8 -*-\n\n# Learn more: https://github.com/kennethreitz/setup.py\n\nfrom setuptools import setup, find_packages\nfrom easemlschema import __version__\n\nwith open(\"README.md\", \"r\") as fh:\n README = fh.read()\n\n# The main source of truth for install requirements of this project is the requirements.txt file.\nwith open(\"requirements.txt\", \"r\") as f:\n REQUIREMENTS = f.readlines()\n\nsetup(\n name='easemlschema',\n version=__version__,\n description='Schema which is used to define the type of a machine learning data set.',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n author='Bojan Karlas',\n author_email='bojan.karlas@gmail.com',\n url='https://github.com/DS3Lab/easeml',\n license='MIT',\n install_requires=REQUIREMENTS,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\"\n ]\n)\n", "sub_path": "schema/python/easemlschema/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1020, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "setuptools.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "easemlschema.__version__", "line_number": 17, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "423718915", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 14 22:16:03 2018\n\n@author: Rachel\n\"\"\"\n\nimport urllib.request\nfrom bson import json_util # added in 2/11\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\n\nclass degreesPerNeighb(dml.Algorithm):\n contributor = 'rmak_rsc3'\n reads = ['rmak_rsc3.getUniversities', 'rmak_rsc3.getGrads', 'rmak_rsc3.getNeighborhoods']\n writes = ['rmak_rsc3.degreesPerNeighb'] #CHANGE\n\n @staticmethod\n def execute(trial = False):\n '''Retrieve some data sets (not using the API here for the sake of simplicity).'''\n startTime = datetime.datetime.now()\n \n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('rmak_rsc3', 'rmak_rsc3') \n \n neighbs = list(repo['rmak_rsc3.getNeighborhoods'].find({}))\n grads = list(repo['rmak_rsc3.getGrads'].find({}))\n unis = list(repo['rmak_rsc3.getUniversities'].find({}))\n \n \n gradCount = {}\n for n in neighbs:\n for u in unis:\n if n['properties']['Name'] in u['properties']['City']:\n for g in grads:\n if g['university_name'] == u['properties']['Name']:\n gradCount[n['properties']['Name']] = gradCount.get(n['properties']['Name'], 0) + g['grads_total']\n \n \n print(gradCount)\n# print(grads[u['properties']['Name']]['grads_total'])\n# gradCount[n['properties']['Name']] = gradCount.get(n['properties']['Name'], 0) + grads[u['properties']['Name']]['grads_total']\n \n \n \n \n \n repo.dropCollection(\"degreesPerNeighb\") \n repo.createCollection(\"degreesPerNeighb\")\n repo['rmak_rsc3.degreesPerNeighb'].insert(gradCount)\n \n repo['rmak_rsc3.degreesPerNeighb'].metadata({'complete':True})\n\n \n\n repo.logout()\n\n endTime = datetime.datetime.now()\n\n return {\"start\":startTime, \"end\":endTime}\n \n \n @staticmethod\n \n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n pass\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n \n # Set up the database connection.\n# client = dml.pymongo.MongoClient()\n# repo = client.repo\n# repo.authenticate('rmak_rsc3', 'rmak_rsc3')\n# doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in # format.\n# doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\n# doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n# doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n# doc.add_namespace('dm', 'http://datamechanics.io/data/rmak_rsc3/')\n#\n# this_script = doc.agent('alg:rmak_rsc3#getGrads', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n#\n# resource = doc.entity('dm:Colleges_and_Universities', {'prov:label':'grads', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'geojson'})\n# getGrads = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n# \n# doc.wasAssociatedWith(getGrads, this_script)\n#\n# doc.usage(getGrads, resource, startTime, None,\n# {prov.model.PROV_TYPE:'ont:Retrieval'\n# }\n# )\n# \n#\n# grad = doc.entity('dat:rmak_rsc3#grads', {prov.model.PROV_LABEL:'grad', prov.model.PROV_TYPE:'ont:DataSet'})\n# doc.wasAttributedTo(grad, this_script)\n# doc.wasGeneratedBy(grad, getGrads, endTime)\n# doc.wasDerivedFrom(grad, resource, getGrads, getGrads, getGrads)\n \n \n\n# repo.logout()\n# \n return doc", "sub_path": "rmak_rsc3/degreesPerNeighb.py", "file_name": "degreesPerNeighb.py", "file_ext": "py", "file_size_in_byte": 4180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "dml.Algorithm", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "dml.pymongo.MongoClient", "line_number": 28, "usage_type": "call"}, {"api_name": "dml.pymongo", "line_number": 28, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "prov.model.model.ProvDocument", "line_number": 71, "usage_type": "call"}, {"api_name": "prov.model.model", "line_number": 71, "usage_type": "attribute"}, {"api_name": "prov.model", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "262339645", "text": "# -*- coding: utf-8 -*-\nfrom time import sleep\nfrom urllib.parse import urlencode\nimport scrapy\nfrom goverment_news.items import GovermentNewsItem\n\nimport re\n\n\nclass JiangxiSpider(scrapy.Spider):\n name = 'jiangxi'\n def start_requests(self):\n url='http://www.jiangxi.gov.cn/col/col393/index.html?'\n for a in range(2,80):\n p={'uid': '45663',\n 'pgeNum':a}\n yield scrapy.Request(url=(url+urlencode(p)), callback=self.parse)\n\n def parse(self, response):\n title = response.xpath('/html/head/title//text').extract()\n content = response.css('#zoom p::text').extract()\n source = response.xpath('//*[@id=\"zoom\"]/div/font[1]/text()[2]').extract()\n time = re.findall(r'发布时间:(.*?)', response.text, re.M)\n\n if len(content) != 0:\n items = GovermentNewsItem()\n items['title'] = title[0].replace('

','').replace('\\r','').replace(' ','').replace('

','').replace('\\n','')\n items['content'] = content\n items['time'] = time\n items['source'] = source\n yield items\n\n urls = re.findall(r' 0:\n # print()\n # print(agent.old_policy.evaluate(agent.to_tensor(st), torch.tensor(0).to(device)))\n # print(agent.old_policy.evaluate(agent.to_tensor(st), torch.tensor(1).to(device)))\n # print(agent.old_policy.evaluate(agent.to_tensor(st), torch.tensor(2).to(device)))\n # print(agent.old_policy.evaluate(agent.to_tensor(st), torch.tensor(3).to(device)))\n # print()\n\n for t in range(T):\n\n a, logprob = agent.get_action(st)\n avg_prob += torch.exp(logprob).detach().cpu().item()\n actual_t += 1\n\n r, done = sim[i].step(a)\n st1 = sim[i].get_state()\n\n mdp_reward += r\n final_r = r\n\n frqs[i] += st1[0, 1]\n\n if t == T - 1:\n done = True\n\n agent.push_batchdata(st, a, logprob, r, done)\n\n if done:\n break\n\n st = st1\n\n mdp_reward = sim[i].normalize_reward(mdp_reward, params['path_length'])\n tot_reward += mdp_reward\n\n agent.writer.add_scalar(\"Average taken action probability\", avg_prob/(actual_t * params['mbs']), int(epoch * params['episodes'] + e))\n agent.write_reward(tot_reward / params['mbs'], final_r / params['mbs'])\n\n # Update the networks\n if e % params['episode_per_update'] == 0: # TODO or size of batchdata..\n agent.update()\n agent.clear_batchdata() # reset the sampled policy trajectories\n agent.epsilon *= 0.995\n # # Save actor critic checkpoints every so often\n # if e % MODEL_SAVE_FREQ == 0 and e > 0:\n # agent.save_model()\n\n # perform a test of the policy where there is no exploration\n if e % params['episodes_test'] == params['episodes_test'] - 1:\n\n frq_img = np.zeros((frqs.shape[-2], frqs.shape[-1], 3))\n frq_img[:, :, 0] = (frqs[0] / np.sum(frqs))*100.\n frq_img[goals[0][0], goals[0][1], 1] = 1\n frq_img[:, :, 2] = batch_mazes[0] / 2.\n agent.writer.add_image(\"Frequency\", frq_img, int(epoch * (params['episodes']/params['episodes_test']) + e / params['episodes_test']), dataformats='HWC')\n frq_img[:, :, 0] = (frqs[0] > 0) * 1.0\n agent.writer.add_image(\"Visited\", frq_img, int(epoch * (params['episodes'] / params['episodes_test']) + e / params['episodes_test']), dataformats='HWC')\n frqs *= 0\n\n tot_reward = 0\n\n for i in range(params['mbs']):\n\n sim[i].reset()\n st = sim[i].get_state()\n mdp_reward = 0\n\n for t in range(batch_path_lengths[i]):\n a, _ = agent.get_action(st, test=True)\n r, done = sim[i].step(a)\n\n st1 = sim[i].get_state()\n\n mdp_reward += r\n\n if done:\n break\n\n st = st1\n\n mdp_reward = sim[i].normalize_reward(mdp_reward, params['path_length'])\n tot_reward += mdp_reward\n\n agent.writer.add_scalar(\"Test reward\", tot_reward/params['mbs'], int(epoch * (params['episodes']/params['episodes_test']) + e / params['episodes_test']))\n\n ''' TEST ON OLD MAZES '''\n\n # Once trained in a new maze, test the performances in the previous mazes.\n if epoch > 0:\n tot_reward = 0\n for x, temp_maze in enumerate(mazes[:epoch*params['mbs']]):\n\n sim = Simulator(starts[x], goals[x], temp_maze, params)\n T = int(paths_length[x] * params['horizon_multiplier'])\n\n # sim.reset()\n st = sim.get_state()\n tmp_reward = 0\n\n for t in range(paths_length[x]):\n\n a, _ = agent.get_action(st, test=True)\n r, done = sim.step(a)\n\n st1 = sim.get_state()\n tmp_reward += r\n\n if done:\n break\n\n st = st1\n\n tmp_reward = sim.normalize_reward(tmp_reward, params['path_length'])\n tot_reward += tmp_reward\n\n print('maze: ' + str(x) + ' reward: ' + str(tmp_reward), end=' ')\n print()\n\n agent.writer.add_scalar(\"Previous mazes average reward\", tot_reward / (epoch*params['mbs']), int(epoch))\n\n ''' TEST IN RANDOM STARTING POINT AND MAZE'''\n\n rnd_start, rnd_goal, rnd_maze, rnd_paths_length, _ = maze_gen.get_maze(central=False)\n rnd_sim = Simulator(rnd_start, rnd_goal, rnd_maze, params)\n T = rnd_paths_length * params['horizon_multiplier']\n\n tot_reward = 0\n\n rnd_sim.reset()\n st = rnd_sim.get_state()\n\n for t in range(rnd_paths_length):\n a, _ = agent.get_action(st, test=True)\n r, done = rnd_sim.step(a)\n\n st1 = rnd_sim.get_state()\n\n tot_reward += r\n\n if done:\n break\n\n st = st1\n\n agent.writer.add_scalar(\"Test reward on random starting point\", rnd_sim.normalize_reward(tot_reward, params['path_length']), int(epoch))\n\n print()\n", "sub_path": "runners/pg.py", "file_name": "pg.py", "file_ext": "py", "file_size_in_byte": 8443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "maze_gen.Maze_Gen", "line_number": 12, "usage_type": "call"}, {"api_name": "agents.pg_agent.PPO", "line_number": 15, "usage_type": "call"}, {"api_name": "agents.pg_agent.REINFORCE", "line_number": 17, "usage_type": "call"}, {"api_name": "maze_gen.get_maze", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 30, "usage_type": "call"}, {"api_name": "env.Simulator", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "env.Simulator", "line_number": 175, "usage_type": "call"}, {"api_name": "maze_gen.get_maze", "line_number": 205, "usage_type": "call"}, {"api_name": "env.Simulator", "line_number": 206, "usage_type": "call"}]} +{"seq_id": "331636964", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 11 11:52:18 2018\n\n@author: heshamelshafei\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 3 11:37:22 2018\n\n@author: heshamelshafei\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\n\"\"\"\n\n# !pip install tensorpac\n\nimport numpy as np\nimport scipy\nimport joblib\nimport h5py\nimport tensorpac\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\nimport pickle\n\nfrom tensorpac import Pac\n\n#suj_list = np.concatenate((np.arange(1,22), np.arange(8,18)), axis=0)\n#gavg_pac = np.empty([14,13,9,2],dtype=float) #suj * amp Hz * pha Hz\n#gavg_pval = gavg_pac\n\n\nsuj_list = range(1,22) # !! \nlist_cue = np.array(['CnD','RCnD','LCnD','NCnD'])\n\ntime1_list = np.array([-0.6,0.6])\ntime1_wind = np.array([0.4,0.4])\n\nix_suj = -1;\n\nfor s in suj_list:\n\n ix_suj = ix_suj + 1\n ix_tst = 0\n\n for ncue in range(0,len(list_cue)):\n for nti in range(0,len(time1_list)):\n \n tbeg = time1_list[nti]\n tend = tbeg + time1_wind[nti]\n \n if tbeg<0:\n nbeg = 'm'+np.str(np.round(np.abs(tbeg)*1000))\n nbeg = nbeg.rstrip('0').rstrip('.')\n else:\n nbeg = 'p'+np.str(np.round(np.abs(tbeg)*1000))\n nbeg = nbeg.rstrip('0').rstrip('.')\n \n if tend<0:\n nend = 'm'+np.str(np.round(np.abs(tend)*1000))\n nend = nend.rstrip('0').rstrip('.')\n else:\n nend = 'p'+np.str(np.round(np.abs(tend)*1000))\n nend = nend.rstrip('0').rstrip('.') \n \n period_name = nbeg+nend\n \n list_chan = np.array(['broad_aud_R','5Neig_aud_R'])\n \n for xi_chan in range(0,len(list_chan)):\n \n suj = \"yc\" + str(s)\n ext_dir = '/Users/heshamelshafei/GoogleDrive/PhD/Fieldtripping/data/yc_all_data/'\n mat_name = ext_dir+suj+'.'+list_cue[ncue]+'.AllYc4Roisexplor.1t20Hz.m800p2000msCov.' + list_chan[xi_chan] +'.MinEvoked.mat'\n mat_content = h5py.File(mat_name,'r')\n \n print('Importing Phase Data for '+suj)\n \n ntrial = len(mat_content['virtsens/trial'])\n trialen = len(mat_content[mat_content['virtsens/trial'][0][0]])\n nchan = len(mat_content[mat_content['virtsens/trial'][0][0]][0])\n time_axis = np.array(mat_content[mat_content['virtsens/time'][0][0]])\n time_axis = np.round(time_axis,3)\n data = np.empty([ntrial, nchan,trialen],dtype=float)\n \n for i in range(0,len(mat_content['virtsens/trial'])):\n \n data[:,:,:] = np.transpose(np.array(mat_content[mat_content['virtsens/trial'][i][0]]))\n \n t1 = float('%.3f'%(time1_list[nti]))\n t2 = float('%.3f'%(np.round(time1_list[nti]+time1_wind[nti],1)))\n \n lm1 = np.int(np.where(time_axis==t1)[0])\n lm2 = np.int(np.where(time_axis==t2)[0])\n \n data_pha = data[:,:,range(lm1,lm2)]\n \n #del data\n \n mat_name = ext_dir+suj+'.'+list_cue[ncue]+'.AllYc4Roisexplor.50t120Hz.m800p2000msCov.' + list_chan[xi_chan] +'.MinEvoked.mat'\n mat_content = h5py.File(mat_name,'r')\n \n print('Importing Amplitude Data for '+suj)\n \n ntrial = len(mat_content['virtsens/trial'])\n trialen = len(mat_content[mat_content['virtsens/trial'][0][0]])\n nchan = len(mat_content[mat_content['virtsens/trial'][0][0]][0])\n time_axis = np.array(mat_content[mat_content['virtsens/time'][0][0]])\n time_axis = np.round(time_axis,3)\n data = np.empty([ntrial, nchan,trialen],dtype=float)\n \n for i in range(0,len(mat_content['virtsens/trial'])):\n \n data[:,:,:] = np.transpose(np.array(mat_content[mat_content['virtsens/trial'][i][0]]))\n \n data_amp = data[:,:,range(lm1,lm2)]\n \n #del data\n \n for n_method in range(1,3):\n for n_surr in range(1,2):\n for n_norm in range(0,1): #range(0,5):\n \n vec_pha = np.arange(7, 20, 1)[:-1]\n vec_amp = np.arange(50, 120, 2)[:-1]\n \n p = Pac(idpac=(n_method, n_surr, n_norm), fpha=(7, 20, 1, 1), famp=(50, 120, 2, 2),\n dcomplex='wavelet',width=7) # start, stop, width, step\n \n ix_tst = ix_tst+ 1\n \n #print('Calculating PAC for '+suj+' Test '+ str(ix_tst) + ' out of 8')\n \n sf = 600\n n_perm = 200\n \n if n_norm==0 or n_method==4:\n xpac = p.filterfit(sf,data_pha, xamp=data_amp,axis=2, nperm=n_perm, get_pval=True)\n pval = 0\n py_pac = {'xpac': xpac, 'pval': pval,'vec_pha':vec_pha,'vec_amp':vec_amp}\n else:\n xpac, pval = p.filterfit(sf,data_pha, xamp=data_amp,axis=2, nperm=n_perm, get_pval=True)\n py_pac = {'xpac': xpac, 'pval': pval,'vec_pha':vec_pha,'vec_amp':vec_amp}\n \n list_method = np.array(['MVL','KLD','HR','ndPAC','PhaSyn']) #The ndPAC uses a p-value computed as 1/nperm.\n list_surr = np.array(['SwPhAmp','SwAmp','ShuAmp','TLag'])\n list_norm = np.array(['NoNorm','SubMean','DivMean','SubDivMean','Zscore'])\n \n fname_out = ext_dir + suj + '.' + list_cue[ncue] + '.' + period_name + '.' + list_chan[xi_chan]\n fname_out = fname_out + '.' + list_method[n_method-1] + '.' +list_surr[n_surr-1] + '.' + list_norm[n_norm] + '.tensorpac200perm.mat'\n \n print('Saving '+fname_out)\n \n sio.savemat(fname_out, {'py_pac':py_pac})\n \n fname_out = ext_dir + suj + '.' + list_cue[ncue] + '.' + period_name\n fname_out = fname_out + '.' + list_method[n_method-1] + '.' +list_surr[n_surr-1] + '.' + list_norm[n_norm] + '.tensorpac200perm.pckl'\n \n #f = open('store.pckl', 'wb')\n #pickle.dump(py_pac, f)\n #f.close()\n \n del pval \n del xpac\n \n #xpac[pval>0.05] = 0\n #gavg_pac[ix_suj,:,:,:] = np.mean(xpac,axis=2)\n #gavg_pval[ix_suj,:,:,:] = np.mean(pval,axis=2)\n #print('Done')\n \n #new_pac = np.mean(gavg_pac,axis=0)\n #p.comodulogram(new_pac[:,:,1], title='Right AcX',cmap='gnuplot',vmin=0, vmax=0.5,plotas='contour', ncontours=10)\n #p.comodulogram(new_pac[:,:,0], title='Left AcX',cmap='gnuplot',vmin=0, vmax=0.5,plotas='contour', ncontours=10)\n \n #fname_out = '/Users/heshamelshafei/Google Drive/PhD/Fieldtripping/data/frompython.mat'\n #sio.savemat(fname_out, {'new_pac':new_pac})\n \n #plt.subplot(2,1,1)\n #plt.subplot(2,1,2)\n #p.comodulogram(new_pac[:,:,1], title='Right AcX',vmin=0, vmax=2,cmap='Spectral_r', plotas='contour', ncontours=5)", "sub_path": "python/pat_pat22_TensorPac.py", "file_name": "pat_pat22_TensorPac.py", "file_ext": "py", "file_size_in_byte": 8077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 102, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorpac.Pac", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 155, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 162, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "569198596", "text": "import numpy as np\r\nimport cv2 as cv\r\n\r\ndef empty():\r\n pass\r\n\r\ncapture=cv.VideoCapture(0)\r\n\r\nc=1\r\n\r\npoint=[]\r\ncv.namedWindow(\"trackbar\")\r\ncv.resizeWindow(\"trackbar\",640,240)\r\ndraw=np.zeros((600,1200,3),dtype=\"uint8\")+255\r\n\r\ncv.createTrackbar(\"Hue Min\",\"trackbar\",92,179,empty)\r\ncv.createTrackbar(\"Sat Min\",\"trackbar\",112,255,empty)\r\ncv.createTrackbar(\"Val Min\",\"trackbar\",163,255,empty)\r\n\r\ncv.createTrackbar(\"Hue Max\",\"trackbar\",179,179,empty)\r\ncv.createTrackbar(\"Sat Max\",\"trackbar\",255,255,empty)\r\ncv.createTrackbar(\"Val Max\",\"trackbar\",255,255,empty)\r\nwhile True:\r\n get,face=capture.read()\r\n face=cv.flip(face,1)\r\n \r\n image=cv.resize(face,(1200,600))\r\n imgHSV=cv.cvtColor(image,cv.COLOR_BGR2HSV)\r\n \r\n hue_min=cv.getTrackbarPos(\"Hue Min\",\"trackbar\")\r\n sat_min=cv.getTrackbarPos(\"Sat Min\",\"trackbar\")\r\n val_min=cv.getTrackbarPos(\"Val Min\",\"trackbar\")\r\n hue_max=cv.getTrackbarPos(\"Hue Max\",\"trackbar\")\r\n sat_max=cv.getTrackbarPos(\"Sat Max\",\"trackbar\")\r\n val_max=cv.getTrackbarPos(\"Val Max\",\"trackbar\")\r\n \r\n\r\n lower=np.array([hue_min,sat_min,val_min])\r\n upper=np.array([hue_max,sat_max,val_max])\r\n \r\n mask=cv.inRange(imgHSV,lower,upper)\r\n mask=cv.erode(mask,(3,3),iterations=1)\r\n mask=cv.morphologyEx(mask,cv.MORPH_OPEN,(3,3))\r\n mask=cv.dilate(mask,(3,3),iterations=1)\r\n img_res=cv.bitwise_and(image,image,mask=mask)\r\n center=None\r\n cnts,_=cv.findContours(mask.copy(),cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)\r\n \r\n if len(cnts)>0:\r\n cnt=sorted(cnts,key=cv.contourArea,reverse=True)[0]\r\n ((x,y),radius)=cv.minEnclosingCircle(cnt)\r\n cv.circle(image,(int(x),int(y)),int(radius),(0,255,255),2)\r\n \r\n center=[int(x),int(y)]\r\n \r\n \r\n point.append(center)\r\n if c==0:\r\n point.append(center)\r\n\r\n \r\n if cv.waitKey(2) & 0xFF==ord(\"w\"):\r\n c=0\r\n while True:\r\n print(\"not\")\r\n if cv.waitKey(2) & 0xFF==ord(\"w\"):\r\n break\r\n \r\n \r\n else:\r\n if radius>10 and len(point)>2:\r\n cv.line(draw,tuple(point[-1]),tuple(point[-2]),(0,255,0),7) \r\n c=1\r\n \r\n \r\n if cv.waitKey(1) & 0xFF==ord(\"c\"): \r\n point=[]\r\n draw=np.zeros((600,1200,3),dtype=\"uint8\")+255\r\n \r\n cv.imshow(\"image\",image)\r\n cv.imshow(\"draw\",draw)\r\n cv.imshow(\"original image\",imgHSV)\r\n cv.imshow(\"masked image\",mask)\r\n \r\n if cv.waitKey(1) & 0xFF==ord(\"q\"):\r\n break\r\n\r\n \r\n\r\n\r\ncv.destroyAllWindows()\r\n", "sub_path": "air_canvas.py", "file_name": "air_canvas.py", "file_ext": "py", "file_size_in_byte": 2588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.resizeWindow", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.getTrackbarPos", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cv2.minEnclosingCircle", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "163001055", "text": "import requests\nimport json\n\nURL = 'http://0.0.0.0:1314'\nURL = 'https://7f667240.ngrok.io'\n\nclass Recommendation:\n\tdef recommend(self, userID, message):\n\t\tres = requests.post(URL, json={'userID' : userID, 'message' : message})\n\n\t\tans = json.loads(res.text)\n\t\treturn ans\n\nif __name__ == '__main__':\n\tserv = Recommendation()\n\tprint(serv.recommend('tuyenhuy09121998', 'thethao'))", "sub_path": "recommend.py", "file_name": "recommend.py", "file_ext": "py", "file_size_in_byte": 377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "requests.post", "line_number": 9, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "143949525", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom flask import Flask,render_template,request,redirect \n#在flask中css.图片,js等静态文件都放在static文件夹中\nfrom sql import db,cursor\napp=Flask(__name__)\nid=0\n@app.route('/',methods=[\"GET\"]) #访问根目录执行的操作\ndef hello_world():\n cursor.execute(\"select * from student\")\n results = cursor.fetchall()\n return render_template('index.html',results=results) #渲染模板,默认在templates下\n@app.route('/add',methods=[\"GET\"])\ndef add():\n return render_template('add.html')\n@app.route('/data',methods=[\"GET\"])\ndef data():\n name=request.args.get('name') # post的数据通过request.form获取\n age=request.args.get('age')\n sex=request.args.get('sex')\n #游标\n if type(id) == type(\"\"):\n cursor.execute(\"update student set name=%s,age=%s,sex=%s where id=%s\",(name,age,sex,id))\n else:\n sql=\"insert into student(name,age,sex) values ('\"+name+\"','\"+age+\"','\"+sex+\"')\"\n cursor.execute(sql)\n db.commit() \n return render_template(\"tishi.html\")\n@app.route('/delect',methods=[\"GET\"])\ndef delect():\n id=request.args.get('id')\n cursor.execute(\"delete from student where id=\"+id)\n db.commit() \n return render_template(\"tishi.html\")\n@app.route('/updata',methods=[\"GET\"])\ndef update():\n global id\n id=request.args.get('id')\n cursor.execute(\"select * from student where id=\"+id)\n db.commit()\n up1=cursor.fetchall() #fetchall全部 ,fetchone 一条数据\n return render_template(\"updata.html\",up1=up1)\napp.run(host=\"0.0.0.0\",port=6660)", "sub_path": "可编辑表格-客户端/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 1594, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "sql.cursor.execute", "line_number": 10, "usage_type": "call"}, {"api_name": "sql.cursor", "line_number": 10, "usage_type": "name"}, {"api_name": "sql.cursor.fetchall", "line_number": 11, "usage_type": "call"}, {"api_name": "sql.cursor", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "sql.cursor.execute", "line_number": 23, "usage_type": "call"}, {"api_name": "sql.cursor", "line_number": 23, "usage_type": "name"}, {"api_name": "sql.cursor.execute", "line_number": 26, "usage_type": "call"}, {"api_name": "sql.cursor", "line_number": 26, "usage_type": "name"}, {"api_name": "sql.db.commit", "line_number": 27, "usage_type": "call"}, {"api_name": "sql.db", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "sql.cursor.execute", "line_number": 32, "usage_type": "call"}, {"api_name": "sql.cursor", "line_number": 32, "usage_type": "name"}, {"api_name": "sql.db.commit", "line_number": 33, "usage_type": "call"}, {"api_name": "sql.db", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "sql.cursor.execute", "line_number": 39, "usage_type": "call"}, {"api_name": "sql.cursor", "line_number": 39, "usage_type": "name"}, {"api_name": "sql.db.commit", "line_number": 40, "usage_type": "call"}, {"api_name": "sql.db", "line_number": 40, "usage_type": "name"}, {"api_name": "sql.cursor.fetchall", "line_number": 41, "usage_type": "call"}, {"api_name": "sql.cursor", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "109974740", "text": "import numpy as np\n\nclass LinearValueProcessor:\n def __init__(self, start_eps, end_eps, end_eps_frames):\n self.start_eps = start_eps\n self.end_eps = end_eps\n self.end_eps_frames = end_eps_frames\n \n def __call__(self, frame):\n if frame >= self.end_eps_frames:\n return self.end_eps\n df = frame / self.end_eps_frames\n return df * self.end_eps + (1.0 - df) * self.start_eps\n\nclass DefaultRewardsShaper:\n def __init__(self, scale_value = 1, shift_value = 0, min_val=-np.inf, max_val=np.inf, is_torch=True):\n self.scale_value = scale_value\n self.shift_value = shift_value\n self.min_val = min_val\n self.max_val = max_val\n self.is_torch = is_torch\n\n def __call__(self, reward):\n \n reward = reward + self.shift_value\n reward = reward * self.scale_value\n \n if self.is_torch:\n import torch\n reward = torch.clamp(reward, self.min_val, self.max_val)\n else:\n reward = np.clip(reward, self.min_val, self.max_val)\n return reward\n\n\ndef flatten_first_two_dims(arr):\n if arr.ndim > 2:\n return arr.reshape(-1, *arr.shape[-(arr.ndim-2):])\n else:\n return arr.reshape(-1)\n\ndef get_or_default(config, name, def_val):\n if name in config:\n return config[name]\n else:\n return def_val\n\ndef free_mem():\n import ctypes\n ctypes.CDLL('libc.so.6').malloc_trim(0) ", "sub_path": "rl_games/common/tr_helpers.py", "file_name": "tr_helpers.py", "file_ext": "py", "file_size_in_byte": 1454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.inf", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 32, "usage_type": "call"}, {"api_name": "ctypes.CDLL", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "654396471", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom setuptools import setup, find_packages\nfrom glob import glob\nfrom os.path import basename\nfrom os.path import dirname\nfrom os.path import join\nfrom os.path import splitext\n\nversion = '1.0'\n\n\ndef get_long_desc():\n with open('README.md') as f:\n return f.read()\n\n\ninstall_requires = [\n 'scipy',\n]\n\ntest_requires = [\n 'pytest',\n]\n\nsetup(\n name='pyelan',\n version=version,\n description=\"Python library for ELAN\",\n long_description=\"Python library for ELAN\",\n url='https://github.com/jonkeane/pyelan/',\n\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n author='Jonathan Keane',\n author_email='jkeane@gmail.com',\n license='GPL3 License',\n install_requires=install_requires,\n tests_require=test_requires,\n extras_require={\n 'testing': test_requires,\n },\n packages=find_packages(exclude=['tests']),\n # package_dir={'': 'pyelan'},\n include_package_data=True,\n entry_points={},\n py_modules=[splitext(basename(path))[0] for path in glob('pyelan/*.py')],\n # namespace_packages=['pyelan'],\n zip_safe=True,\n)", "sub_path": "pyelan/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "setuptools.setup", "line_number": 27, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 50, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "120407671", "text": "from setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport sys\n\n\npython_version = sys.version_info\n__version__ = \"1.1.01\"\n\nNUMPY_VERSION = 'numpy >= 1.9.2'\n\n\nclass PyTest(TestCommand, object):\n\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n super(PyTest, self).initialize_options()\n self.pytest_args = []\n\n def finalize_options(self):\n super(PyTest, self).finalize_options()\n self.test_suite = True\n self.test_args = []\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n exit(pytest.main(self.pytest_args))\n\n# readme = open('README.rst').read()\n\n# doclink = \"\"\"\n# Documentation\n# -------------\n#\n# The full documentation is at http://geoscienceaustralia.github.io/passive\n# -seismic\n# /.\"\"\"\n# history = open('HISTORY.rst').read().replace('.. :changelog:', '')\n\nsetup(\n name='PhasePApy',\n version=__version__,\n # description='Repository for development of software and '\n # 'metadata for passive seismic project',\n # long_description=readme + '\\n\\n' + doclink + '\\n\\n' + history,\n # author='Geoscience Australia Passive Seismic Team',\n author_email='',\n url='https://github.com/GeoscienceAustralia/PhasePApy',\n packages=['phasepapy', 'phasepapy.associator', 'phasepapy.phasepicker'],\n dependency_links=['https://github.com/matplotlib/basemap/archive/v1.1.0'\n '.zip#egg=basemap-1.1.0'],\n package_dir={'PhasePApy': 'phasepapy'},\n include_package_data=True,\n # numpy preinstall required due to obspy\n setup_requires=[\n NUMPY_VERSION, 'Cython >= 0.22.1',\n ],\n install_requires=[\n NUMPY_VERSION,\n 'scipy >= 0.15.1',\n 'matplotlib >= 2.0.0', # need to install inside virtualenv for basemap\n 'obspy >= 1.1.0',\n 'pillow >= 4.1.1',\n 'basemap == 1.1.0',\n 'mpi4py == 2.0.0',\n 'geographiclib',\n ],\n extras_require={\n 'dev': [\n 'sphinx',\n 'ghp-import',\n 'sphinxcontrib-programoutput',\n 'tox',\n 'pytest-flake8 >= 0.8.1',\n 'pytest-mock >= 1.6.0',\n 'pytest-cov >= 2.5.1',\n 'pytest-regtest >= 0.15.1',\n 'flake8-docstrings >= 1.1.0',\n 'coverage',\n 'codecov',\n 'coveralls >= 1.1',\n 'pytest >= 3.2'\n ]\n },\n\n license=\"See Readme\",\n zip_safe=False,\n keywords='Seismic, associator, picker, PhasePicker, '\n 'FBpicker, AICDpicker, KTpicker, P and S phases',\n classifiers=[\n 'Development Status :: 4 - Beta',\n \"Operating System :: POSIX\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n # \"Programming Language :: Python :: 3.7\",\n # add additional supported python versions\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Information Analysis\"\n # add more topics\n ],\n cmdclass={\n 'test': PyTest,\n }\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 3605, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.version_info", "line_number": 6, "usage_type": "attribute"}, {"api_name": "setuptools.command.test.test", "line_number": 12, "usage_type": "name"}, {"api_name": "pytest.main", "line_number": 28, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "654460867", "text": "from PIL import Image\nimport numpy as np\n# import time\nimport os\ntraindir = \"train/\"\ntestdir = \"test/\"\nsavedir = \"result/\"\nheight = 10\nwidth = 10\ncolor = 3\n\n\ndef removedirs(top):\n for root, dirs, files in os.walk(top, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n\n\ndef saveimage(Im, base, i):\n file = base + str(i) + \".bmp\"\n if not os.path.exists(file):\n open(file, \"w+\").close()\n Im.save(file)\n\n\ndef getimage(base, name):\n Im = Image.open(base + name)\n # print(data)\n return Im\n\n\ndef imagetodata(base, name):\n # print(\"read file\", name)\n Im = Image.open(base + name)\n # print(Im.mode, Im.size, Im.format)\n # Im.thumbnail([10, 10], Image.ANTIALIAS)\n Im = Im.convert(\"RGB\").resize((height, width))\n # Im = Im.resize(size)\n # print(\"size\", Im.size)\n data = (np.asarray(Im.getdata(), dtype=float) + 1) / 257\n # print(data)\n data.resize((height * width * color))\n # print(data)\n return data\n\n\ndef datatoimage(data):\n data = data * 257 - 1\n data.resize((height, width, color))\n Im = Image.fromarray(data.astype(np.uint8))\n return Im\n\n\n# opt= vertical ,horizontal 选择水平显示拼接的图像,或者垂直拼接\ndef imagejoint(image_list, opt='horizontal'):\n image_num = len(image_list)\n image_size = image_list[0].size\n height = image_size[1]\n width = image_size[0]\n\n if opt == 'vertical':\n new_img = Image.new('RGB', (width, image_num * height), 255)\n else:\n new_img = Image.new('RGB', (image_num * width, height), 255)\n x = y = 0\n count = 0\n for img in image_list:\n new_img.paste(img, (x, y))\n count += 1\n if opt == 'horizontal':\n x += width\n else:\n y += height\n return new_img\n\n\n# data = imagetodata(traindir, \"c94ca86b-c5ad-4d46-94b2-864f90182ff0.jpg\")\n# Im = datatoimage(data)\n# Im.show()\n", "sub_path": "imageutil.py", "file_name": "imageutil.py", "file_ext": "py", "file_size_in_byte": 1985, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.walk", "line_number": 14, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 42, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 52, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 52, "usage_type": "attribute"}, {"api_name": "PIL.Image.new", "line_number": 64, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 64, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "609760009", "text": "#!use/bin/Nenv python\n# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\nimport scipy.special as comb\nimport math\nfrom operator import mul\nimport neal\nimport dimod\nimport random\nimport matplotlib.pyplot as plt\nimport timeit\nimport time\nfrom itertools import combinations\n\n\ndef calc_marginals(df):\n\treturn np.array(\n [sum(df['Y']),\n np.dot(df['Y'], df['SEX']),\n np.dot(df['Y'], df['AOP'])\n\t\t])\n\n\ndef make_Hamiltonian(df):\n t_list = calc_marginals(df)\n \n N=len(df)\n dup_list = [(i, i) for i in range(N)]\n comb_list = [(i, j) for i in range(N) for j in range(i+1, N)]\n \n lin_Y = [1-2*t_list[0] for (i, _) in dup_list] #同じy同士\n quad_Y = [2 for (i, j) in comb_list] #異なるy同士\n num_Y = t_list[0]**2 #数字の二乗\n \n SEX = df['SEX'].iloc\n lin_SEX = [(SEX[i] - 2 * t_list[1]) * SEX[i] for (i, _) in dup_list]\n quad_SEX = [2*SEX[i] * SEX[j] for (i, j) in comb_list]\n num_SEX = t_list[1]**2\n \n AOP = df['AOP'].iloc\n lin_AOP = [(AOP[i] - 2 * t_list[2]) * AOP[i] for (i, _) in dup_list]\n quad_AOP = [2*AOP[i] * AOP[j] for (i, j) in comb_list]\n num_AOP = t_list[2]**2\n \n lin_list = [sum(lin) for lin in zip(lin_Y, lin_SEX, lin_AOP)]\n lin = {i: lin_list[i] for (i, _) in dup_list}\n \n quad_values = [sum(quad) for quad in zip(quad_Y, quad_SEX, quad_AOP)]\n quad = {ij: quad_values[n] for (n, ij) in enumerate(comb_list)}\n \n num = num_Y + num_SEX + num_AOP\n return dimod.BinaryQuadraticModel(lin, quad, num, dimod.Vartype.BINARY)#dic, dic, num\n\n\ndef make_res_data(df, num_reads):\n sa_sampler = neal.sampler.SimulatedAnnealingSampler()\n initial_states = df['Y'].values.tolist()\n bqm = make_Hamiltonian(df)\n time_0 = time.time() ##\n res = sa_sampler.sample(\n bqm, num_reads = num_reads,\n initial_states = initial_states,\n initial_states_generator = 'tile'\n )\n time_1 = time.time() ##\n elapsed_time = time_1 - time_0\n return res, elapsed_time\n\n\ndef find_valid_y(res):\n valid_y_info_dic = {}#sample:occurrence\n for sample, energy, num_occurrences in list(res.data(['sample', 'energy', 'num_occurrences'])):\n if energy==0.:\n this_time_y = tuple(sample.values())\n if this_time_y in list(valid_y_info_dic.keys()):#\n valid_y_info_dic[this_time_y] += num_occurrences#\n else:\n valid_y_info_dic[this_time_y] = num_occurrences#\n return valid_y_info_dic\n\n\ndef num_y_transition_nodup(df, num_reads, path):\n time_0 = time.time()\n sa_sampler = neal.sampler.SimulatedAnnealingSampler()\n bqm = make_Hamiltonian(df)\n res = sa_sampler.sample(\n bqm,\n num_reads = num_reads,\n initial_states = df['Y'].values.tolist(),\n initial_states_generator = 'tile'\n )\n \n valid_y_info_dic_nodup = {}\n time_list = []\n for sample, energy, num_occurrences in list(res.data(['sample', 'energy', 'num_occurrences'])):\n if energy==0.:\n sample_tu = tuple(sample.values())\n if sample_tu in list(valid_y_info_dic_nodup.keys()):\n continue\n else:\n valid_y_info_dic_nodup[sample_tu] = 1\n time_1 = time.time()\n elapsed_time = time_1 - time_0\n time_list.append(elapsed_time)\n valid_y_num_list = [i for i in range(1, len(valid_y_info_dic_nodup)+1)]\n plt.xlabel('time')\n plt.ylabel('number of valid y')\n plt.plot(time_list, valid_y_num_list)\n plt.savefig(path)\n plt.show()\n plt.close()\n return time_list\n\n\nclass SA_res_valid_dic():\n def __init__(self, df, res, valid_y_info_dic, num_reads):\n self.df = df\n self.res = res\n self.valid_y_info_dic = valid_y_info_dic\n self.num_reads = num_reads\n \n def p_value_transition(self, output_path):\n valid_y_list = []\n t1 = int(np.dot(self.df['Y'], self.df['LI']))\n t1_y = 0\n p_dic = {}\n \n for sample, energy, num_occurrences in list(self.res.data(['sample', 'energy', 'num_occurrences'])):\n if energy==0.:\n this_time_y = tuple(sample.values())\n if this_time_y in valid_y_list:\n continue\n else:\n valid_y_list.append(this_time_y)#\n this_time_y_se = pd.Series(this_time_y)\n if int(np.dot(this_time_y_se, self.df['LI'])) == t1:\n t1_y += 1\n p_dic[len(valid_y_list)] = t1_y / len(valid_y_list)\n plt.xlabel('number of valid y')\n plt.ylabel('p value')\n plt.plot(list(p_dic.keys()), list(p_dic.values()))\n plt.savefig(output_path)\n plt.show()\n plt.close()\n return valid_y_list, p_dic\n \n def occurrence_hist(self, plot_path):\n occurrence_list = list(self.valid_y_info_dic.values())\n x = [i for i in range(len(occurrence_list))]\n plt.xlabel('each sample')\n plt.ylabel('number of the occurrence')\n plt.bar(x, occurrence_list)\n ax = plt.gca()\n ax.axes.xaxis.set_visible(False)\n plt.savefig(plot_path)\n plt.show()\n plt.close()\n return occurrence_list\n \n def y_num_hist(self, path):\n t_dic = {}\n valid_y_list = []\n for valid_y in list(self.valid_y_info_dic.keys()):\n if valid_y in valid_y_list:\n continue\n else:\n valid_y_se = pd.Series(valid_y)\n this_time_t1 = int(np.dot(valid_y_se, self.df['LI']))\n if this_time_t1 in list(t_dic.keys()):\n t_dic[this_time_t1] += 1\n else:\n t_dic[this_time_t1] = 1\n x = [i for i in list(t_dic.keys())]\n y = [i for i in list(t_dic.values())]\n plt.xlabel('value of t1')\n plt.ylabel('number of sample')\n plt.bar(x, y)\n plt.xticks(x, x)\n plt.yticks(y, y)\n plt.savefig(path)\n plt.show()\n plt.close()\n return t_dic\n \n def calc_p_value(self):\n t1 = int(np.dot(self.df['Y'], self.df['LI']))\n num_t1_y = 0\n for valid_y in list(self.valid_y_info_dic.keys()):\n valid_y_se = pd.Series(valid_y)\n if int(np.dot(valid_y_se, self.df['LI'])) <= t1:####################\n num_t1_y += 1\n return num_t1_y/len(self.valid_y_info_dic)\n \n def calc_p_value_noeq(self):\n t1 = int(np.dot(self.df['Y'], self.df['LI']))\n num_t1_y = 0\n for valid_y in list(self.valid_y_info_dic.keys()):\n valid_y_se = pd.Series(valid_y)\n if int(np.dot(valid_y_se, self.df['LI'])) < t1:####################\n num_t1_y += 1\n return num_t1_y/len(self.valid_y_info_dic)", "sub_path": "202012/SA/SA_exact_test_functions.py", "file_name": "SA_exact_test_functions.py", "file_ext": "py", "file_size_in_byte": 6833, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 22, "usage_type": "call"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 54, "usage_type": "call"}, {"api_name": "dimod.Vartype", "line_number": 54, "usage_type": "attribute"}, {"api_name": "neal.sampler.SimulatedAnnealingSampler", "line_number": 58, "usage_type": "call"}, {"api_name": "neal.sampler", "line_number": 58, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 67, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "neal.sampler.SimulatedAnnealingSampler", "line_number": 86, "usage_type": "call"}, {"api_name": "neal.sampler", "line_number": 86, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 188, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 197, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "397737738", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np;\nimport matplotlib.pyplot as plt;\n\nimport tensorflow as tf;\nimport keras as ks;\n\nimport cv2;\n\nfrom keras.datasets import mnist;\nfrom keras.models import Sequential, load_model;\nfrom keras.layers.core import Dense, Dropout, Activation;\nfrom keras.utils import np_utils;\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data();\n\n\n\n# OpenCV 測試\n#cv2.imshow( '' , X_train[0,:] );\n\n\n# matplot 畫圖 , 顯示前九筆\nfig = plt.figure(\"0~9圖\");\nfor nn in range( 0 , 10 ):\n plt.subplot( 3 , 3 , nn + 1); # 生成 3 x 3 = 9 張圖\n plt.tight_layout(); # 表示紧凑显示图像(周圍空白較少)\n plt.imshow( X_train[nn] , cmap='binary' , interpolation='none' );\n# plt.imshow( X_train[nn] , cmap='summer' , interpolation='none' );\n plt.title('Class = {0}'.format( y_train[nn] ) );\n plt.colorbar(shrink=.92);\n plt.xticks(());\n plt.yticks(());\nfig.show(); \n\n\n# 顯示Hist長條圖\nindex_choose = 1;\nfig2 = plt.figure('長條圖分佈');\nplt.subplot(2,1,1);\nplt.imshow( X_train[index_choose] , cmap = 'binary' , interpolation = 'none' );\nplt.title('Class = {0}'.format( y_train[index_choose] ) );\nplt.colorbar(shrink=.92);\nplt.xticks(());\nplt.yticks(());\nplt.subplot(2,1,2);\nplt.grid(True);\nplt.hist( X_train[0].reshape(-1) );\nplt.title('Pixel Value Distribution');\nplt.show();\n\n\n# one-hot encoding : 將輸出資料轉換為 0 & 1 組成的陣列,\n# ex: 若是數字 5\n# 0 1 2 3 4 5 6 7 8 9\n# 0 0 0 0 0 1 0 0 0 0 --> index 5 是 1 , 其他為 0\nn_classes = 10;\nY_train = np_utils.to_categorical( y_train , n_classes );\nY_test = np_utils.to_categorical( y_test , n_classes );\nprint('ONE-HOT ENC 之後Shape %s' % str( y_train.shape ))\nprint('ONE-HOT ENC 之後Shape %s' % str( Y_train.shape ))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "ML_workspace/tensorflow_test.py", "file_name": "tensorflow_test.py", "file_ext": "py", "file_size_in_byte": 1793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 59, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "465736293", "text": "from django.shortcuts import render\nfrom common import game\nfrom django.template.defaulttags import register\nfrom django.conf import settings\nfrom common import game\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary[key]\n\n# Create your views here.\ndef moviedex_f(request):\n d = game.data_game()\n d.load_state()\n\n selector = game.Selector()\n if len(d.data['moviedex']):\n\n if request.method == \"POST\":\n if 'left' in request.POST:\n selector.minus(d.data)\n elif 'right' in request.POST:\n selector.plus(d.data)\n else:\n selector.reset(d.data)\n detail_link = 'http://127.0.0.1:8000/moviedex/' + selector.slot_place + '/'\n else:\n selector.slot_place = \"\"\n detail_link = '#'\n return render(request, \"moviedex.html\",{\n 'movie_list': d.data['list_moviemon'],\n 'moviedex': d.data['moviedex'],\n 'selector_pos': selector.slot_place,\n 'detail_link': detail_link,\n 'len_moviedex': len(d.data['moviedex']),\n 'worldmap_link': 'http://127.0.0.1:8000/worldmap/' })\n", "sub_path": "rush00/moviedex/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.template.defaulttags.register.filter", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.template.defaulttags.register", "line_number": 7, "usage_type": "name"}, {"api_name": "common.game.data_game", "line_number": 13, "usage_type": "call"}, {"api_name": "common.game", "line_number": 13, "usage_type": "name"}, {"api_name": "common.game.Selector", "line_number": 16, "usage_type": "call"}, {"api_name": "common.game", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "518720619", "text": "import base64\nimport re\nimport tempfile\nfrom io import BytesIO\nimport importlib\nfrom pathlib import Path\nfrom time import gmtime\n\nimport arrow\nfrom docx.oxml import OxmlElement\nimport matplotlib\nfrom docx.text.paragraph import Paragraph\nfrom matplotlib import pyplot as plt\nimport matplotlib.font_manager as fm\n\nfrom sane_doc_reports.domain import CellObject, Section\nfrom sane_doc_reports.conf import SIZE_H_INCHES, SIZE_W_INCHES, \\\n DEFAULT_DPI, DEFAULT_LEGEND_FONT_SIZE, DEFAULT_WORD_FONT, \\\n DEFAULT_ALPHA, DEFAULT_FONT_COLOR, DEFAULT_WORD_FONT_FALLBACK, \\\n DEFAULT_FONT_AXIS_COLOR, LEGEND_STYLE\n\n\ndef open_b64_image(image_base64):\n \"\"\"\n Open a virtual image file from base64 format of image.\n \"\"\"\n prefix_regex = r'^data:.*?;base64,'\n raw_base64 = re.sub(prefix_regex, '', image_base64)\n f = BytesIO()\n f.write(base64.b64decode(raw_base64))\n f.seek(0)\n return f\n\n\ndef insert_by_type(type: str, cell_object: CellObject,\n section: Section):\n \"\"\" Call a elements elemnt's insert method \"\"\"\n try:\n func = importlib.import_module(f'sane_doc_reports.elements.{type}')\n func.invoke(cell_object, section)\n except ModuleNotFoundError:\n import sane_doc_reports.elements.unimplemented as unimplemented\n unimplemented.invoke(cell_object, section)\n\n\ndef _insert_paragraph_after(paragraph):\n \"\"\"Insert a new paragraph after the given paragraph.\"\"\"\n new_p = OxmlElement(\"w:p\")\n paragraph._p.addnext(new_p)\n new_para = Paragraph(new_p, paragraph._parent)\n\n return new_para\n\n\ndef add_run(cell_object):\n \"\"\" Insert a paragraph so we could add a new element\"\"\"\n cell_object.paragraph = _insert_paragraph_after(cell_object.paragraph)\n cell_object.run = cell_object.paragraph.add_run()\n return cell_object\n\n\ndef has_run(cell_object: CellObject):\n \"\"\" A helper used to make sure to add a run \"\"\"\n if cell_object.run is None:\n cell_object.add_run()\n\n\ndef plot(func):\n \"\"\" A decorator used to clear and resize each chart \"\"\"\n\n def wrapper(*args, **kwargs):\n if plt:\n plt.close()\n plt.clf()\n plt.cla()\n # Fix cropping of plot\n plt.rcParams['figure.constrained_layout.use'] = True\n func(*args, **kwargs)\n\n return wrapper\n\n\ndef plt_t0_b64(plt: matplotlib.pyplot):\n \"\"\" Matplotlib to base64 url \"\"\"\n path = Path(tempfile.mkdtemp()) / Path(\n next(tempfile._get_candidate_names()) + '.png')\n\n plt.savefig(str(path), format='png', bbox_inches='tight', figsize=(1, 1),\n dpi=DEFAULT_DPI)\n\n with open(str(path), \"rb\") as f:\n img_base64 = base64.b64encode(f.read()).decode(\"utf-8\", \"ignore\")\n b64 = f'data:image/png;base64,{img_base64}'\n\n path.unlink()\n return b64\n\n\ndef convert_plt_size(section: Section):\n \"\"\" Convert the plot size from pixels to word \"\"\"\n size_w, size_h, dpi = (SIZE_W_INCHES, SIZE_H_INCHES, DEFAULT_DPI)\n if 'dimensions' in section.layout:\n h = section.layout['dimensions']['height'] / DEFAULT_DPI\n w = section.layout['dimensions']['width'] / DEFAULT_DPI\n size_w, size_h, dpi = (w, h, DEFAULT_DPI)\n\n return size_w, size_h, dpi\n\n\ndef get_ax_location(legend_style):\n \"\"\" Get the legend location from the verticalAlign key or return default \"\"\"\n align = legend_style.get('align', None)\n vertical_align = legend_style.get('verticalAlign', None)\n\n if not align or not vertical_align:\n return 'best'\n\n vertical_align = vertical_align.replace('top', 'upper').replace(\n 'bottom', 'lower')\n return f'{vertical_align} {align}'\n\n\ndef get_current_li(extra, list_type='List Number'):\n \"\"\" Return the current list item style and indent level \"\"\"\n list_type = list_type if 'list_type' not in extra else extra['list_type']\n if not extra or 'list_level' not in extra:\n return list_type, 0, list_type\n\n extra_list_level = int(extra['list_level'])\n list_level = 0\n if extra_list_level == 0:\n list_level = 2\n p_style = list_type\n elif extra_list_level > 3:\n # The docx template doesn't support more than\n # 4 levels of indentation.\n list_level = 4\n p_style = f'{list_type} {list_level}'\n elif extra_list_level > 0:\n list_level += extra['list_level'] + 1\n p_style = f'{list_type} {list_level}'\n\n return p_style, list_level, list_type\n\n\ndef list_number(doc, par, prev=None, level=None, num=True):\n \"\"\"\n Taken from: https://github.com/python-openxml/python-docx/issues/25\n \"\"\"\n xpath_options = {\n True: {'single': 'count(w:lvl)=1 and ', 'level': 0},\n False: {'single': '', 'level': level},\n }\n\n def style_xpath(prefer_single=True):\n style = par.style.style_id\n return (\n 'w:abstractNum['\n '{single}w:lvl[@w:ilvl=\"{level}\"]/w:pStyle[@w:val=\"{style}\"]'\n ']/@w:abstractNumId'\n ).format(style=style, **xpath_options[prefer_single])\n\n def type_xpath(prefer_single=True):\n type = 'decimal' if num else 'bullet'\n return (\n 'w:abstractNum['\n '{single}w:lvl[@w:ilvl=\"{level}\"]/w:numFmt[@w:val=\"{type}\"]'\n ']/@w:abstractNumId'\n ).format(type=type, **xpath_options[prefer_single])\n\n def get_abstract_id():\n for fn in (style_xpath, type_xpath):\n for prefer_single in (True, False):\n xpath = fn(prefer_single)\n ids = numbering.xpath(xpath)\n if ids:\n return min(int(x) for x in ids)\n return 0\n\n if (prev is None or\n prev._p.pPr is None or\n prev._p.pPr.numPr is None or\n prev._p.pPr.numPr.numId is None):\n if level is None:\n level = 0\n numbering = doc.part.numbering_part.numbering_definitions._numbering\n # Compute the abstract ID first by style, then by num\n anum = get_abstract_id()\n # Set the concrete numbering based on the abstract numbering ID\n num = numbering.add_num(anum)\n # Make sure to override the abstract continuation property\n num.add_lvlOverride(ilvl=level).add_startOverride(1)\n # Extract the newly-allocated concrete numbering ID\n num = num.numId\n else:\n if level is None:\n level = prev._p.pPr.numPr.ilvl.val\n # Get the previous concrete numbering ID\n num = prev._p.pPr.numPr.numId.val\n par._p.get_or_add_pPr().get_or_add_numPr().get_or_add_numId().val = num\n par._p.get_or_add_pPr().get_or_add_numPr().get_or_add_ilvl().val = level\n\n\ndef remove_plot_borders(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n\ndef set_axis_font(ax):\n font = fm.FontProperties(family=get_chart_font(),\n size=DEFAULT_LEGEND_FONT_SIZE)\n\n ax.tick_params(axis='x', colors=DEFAULT_FONT_AXIS_COLOR)\n ax.tick_params(axis='y', colors=DEFAULT_FONT_AXIS_COLOR)\n\n for label in ax.get_xticklabels():\n label.set_fontproperties(font)\n\n for label in ax.get_yticklabels():\n label.set_fontproperties(font)\n\n\ndef set_legend_style(legend, options=None):\n plt.gcf().autofmt_xdate()\n if options:\n if 'hideLegend' in options and options['hideLegend']:\n plt.gca().legend().set_visible(False)\n return\n\n legend.get_frame().set_alpha(DEFAULT_ALPHA)\n legend.get_frame().set_linewidth(0.0)\n\n font = fm.FontProperties(family=get_chart_font(),\n size=DEFAULT_LEGEND_FONT_SIZE)\n\n for text in legend.get_texts():\n text.set_fontproperties(font)\n text.set_color(DEFAULT_FONT_COLOR)\n if 'valign' in options:\n text.set_position((0, options['valign']))\n\n\ndef change_legend_vertical_alignment(section: Section, top=0):\n section.layout[LEGEND_STYLE]['valign'] = top\n return section\n\n\ndef get_chart_font():\n names = [f.name for f in matplotlib.font_manager.fontManager.ttflist]\n\n if DEFAULT_WORD_FONT not in names:\n return DEFAULT_WORD_FONT_FALLBACK\n return DEFAULT_WORD_FONT\n\n\ndef get_formatted_date(input_date,\n layout=None) -> str:\n \"\"\" Returns the formatted date string\n input_date - date we want to format\n layout - custom formats from the sane JSONs\n\n Note: ParserError is raised and should be catched if used.\n \"\"\"\n date = arrow.now()\n\n # Use the date if supplied, and not now()\n if input_date:\n date = arrow.get(input_date)\n\n formatted_date = date.isoformat()\n\n # Use the user supplied format\n if layout and 'format' in layout:\n formatted_date = date.format(layout['format'])\n\n return formatted_date\n", "sub_path": "docker/sane-doc-reports/src/sane_doc_reports/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 8818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "re.sub", "line_number": 28, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 29, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 30, "usage_type": "call"}, {"api_name": "sane_doc_reports.domain.CellObject", "line_number": 35, "usage_type": "name"}, {"api_name": "sane_doc_reports.domain.Section", "line_number": 36, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 39, "usage_type": "call"}, {"api_name": "sane_doc_reports.elements.unimplemented.invoke", "line_number": 43, "usage_type": "call"}, {"api_name": "sane_doc_reports.elements.unimplemented", "line_number": 43, "usage_type": "name"}, {"api_name": "docx.oxml.OxmlElement", "line_number": 48, "usage_type": "call"}, {"api_name": "docx.text.paragraph.Paragraph", "line_number": 50, "usage_type": "call"}, {"api_name": "sane_doc_reports.domain.CellObject", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 77, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 85, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 85, "usage_type": "call"}, {"api_name": "tempfile._get_candidate_names", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_DPI", "line_number": 89, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 92, "usage_type": "call"}, {"api_name": "sane_doc_reports.domain.Section", "line_number": 99, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.SIZE_W_INCHES", "line_number": 101, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.SIZE_H_INCHES", "line_number": 101, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_DPI", "line_number": 101, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_DPI", "line_number": 103, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_DPI", "line_number": 104, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_DPI", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.font_manager.FontProperties", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.font_manager", "line_number": 212, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_LEGEND_FONT_SIZE", "line_number": 213, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_FONT_AXIS_COLOR", "line_number": 215, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_FONT_AXIS_COLOR", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_ALPHA", "line_number": 232, "usage_type": "argument"}, {"api_name": "matplotlib.font_manager.FontProperties", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.font_manager", "line_number": 235, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_LEGEND_FONT_SIZE", "line_number": 236, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_FONT_COLOR", "line_number": 240, "usage_type": "argument"}, {"api_name": "sane_doc_reports.domain.Section", "line_number": 245, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.LEGEND_STYLE", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.font_manager", "line_number": 251, "usage_type": "attribute"}, {"api_name": "sane_doc_reports.conf.DEFAULT_WORD_FONT", "line_number": 253, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_WORD_FONT_FALLBACK", "line_number": 254, "usage_type": "name"}, {"api_name": "sane_doc_reports.conf.DEFAULT_WORD_FONT", "line_number": 255, "usage_type": "name"}, {"api_name": "arrow.now", "line_number": 266, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 270, "usage_type": "call"}]} +{"seq_id": "220771610", "text": "import googlemaps\nimport pandas as pd \nimport time \n\ndef miles_to_meters(miles): \n try:\n return miles * 1_609.344\n except:\n return 0\n\nAPI_KEY = open('API_KEY.txt', 'r').read()\nmap_client = googlemaps.CLient(API_KEY)\n\nlocation = (37.785970, -122.429051)\nsearch_string = 'ramen'\ndistance = miles_to_meters(15)\nbusiness_list = []\n\nresponse = map_client.places_nearby(\n location=location,\n keyword=search_string,\n name='ramen shop',\n radius=distance\n)\n\nbusiness_list.extend(response.get('results'))\nnext_page_token = response.get('next_page_token')\n\nwhile next_page_token:\n time.sleep(2)\n response = map_client.places_nearby(\n location=location,\n keyword=search_string,\n name='ramen shop',\n radius=distance,\n page_token=next_page_token\n)\nbusiness_list.extend(response.get('results'))\nnext_page_token = response.get('next_page_token')\n\ndf = pd.DataFrame(business_list)\ndf['url'] = 'https://www.google.com/maps/place/?q=place_id:' + df['place_id']\ndf.to_sheets('python ramen search results.sheets', index=False)\n\n\n", "sub_path": "mapsAPI.py/searchBusinessesAPI.py", "file_name": "searchBusinessesAPI.py", "file_ext": "py", "file_size_in_byte": 1061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "googlemaps.CLient", "line_number": 12, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "7313505", "text": "# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport re\n\nfrom pants.util.dirutil import read_file\nfrom pants_test.pants_run_integration_test import PantsRunIntegrationTest\n\n\nTEST_DIR = 'testprojects/src/scala/org/pantsbuild/testproject'\n\n\nclass ScalaFixIntegrationTest(PantsRunIntegrationTest):\n\n @classmethod\n def hermetic(cls):\n return True\n\n def test_scalafix_fail(self):\n\n rules = {'rules': 'ProcedureSyntax'}\n options = {\n 'lint.scalafix': rules,\n 'fmt.scalafix': rules,\n 'lint.scalastyle': {'skip': True}\n }\n\n target = f'{TEST_DIR}/procedure_syntax'\n # lint should fail because the rule has an impact.\n failing_test = self.run_pants(['lint', target], options)\n self.assert_failure(failing_test)\n\n def test_scalafix_disabled(self):\n\n rules = {'rules': 'ProcedureSyntax'}\n options = {\n 'lint.scalafix': rules,\n 'fmt.scalafix': rules,\n 'lint.scalastyle': {'skip': True}\n }\n\n # take a snapshot of the file which we can write out\n # after the test finishes executing.\n test_file_name = f'{TEST_DIR}/procedure_syntax/ProcedureSyntax.scala'\n\n with self.with_overwritten_file_content(test_file_name):\n # format an incorrectly formatted file.\n target = f'{TEST_DIR}/procedure_syntax'\n fmt_result = self.run_pants(['fmt', target], options)\n self.assert_success(fmt_result)\n\n # verify that the lint check passes.\n test_fix = self.run_pants(['lint', target], options)\n self.assert_success(test_fix)\n\n def test_scalafix_scalacoptions(self):\n\n rules = {\n 'rules': 'RemoveUnused',\n 'semantic': True\n }\n options = {\n 'scala': {\n 'scalac_plugin_dep': f'{TEST_DIR}/rsc_compat:semanticdb-scalac',\n 'scalac_plugins': '+[\"semanticdb\"]'\n },\n 'compile.rsc': {'args': '+[\"-S-Ywarn-unused\"]'},\n 'lint.scalafix': rules,\n 'fmt.scalafix': rules,\n 'lint.scalastyle': {'skip': True}\n }\n\n test_file_name = f'{TEST_DIR}/rsc_compat/RscCompat.scala'\n\n with self.with_overwritten_file_content(test_file_name):\n # format an incorrectly formatted file.\n target = f'{TEST_DIR}/rsc_compat'\n fmt_result = self.run_pants(['fmt', target], options)\n self.assert_success(fmt_result)\n\n # verify that the lint check passes.\n test_fix = self.run_pants(['lint', target], options)\n self.assert_success(test_fix)\n\n def test_rsccompat_fmt(self):\n options = {\n 'scala': {\n 'scalac_plugin_dep': f'{TEST_DIR}/rsc_compat:semanticdb-scalac',\n 'scalac_plugins': '+[\"semanticdb\"]'\n },\n 'fmt.scalafix': {\n 'rules': 'scala:rsc.rules.RscCompat',\n 'semantic': True,\n 'transitive': True,\n 'scalafix_tool_classpath': f'{TEST_DIR}/rsc_compat:rsc-compat',\n },\n 'lint.scalafix': {'skip': True},\n 'lint.scalastyle': {'skip': True},\n }\n \n test_file_name = f'{TEST_DIR}/rsc_compat/RscCompat.scala'\n fixed_file_name = f'{TEST_DIR}/rsc_compat/RscCompatFixed.scala'\n\n with self.with_overwritten_file_content(test_file_name):\n # format an incorrectly formatted file.\n target = f'{TEST_DIR}/rsc_compat'\n fmt_result = self.run_pants(['fmt', target], options)\n self.assert_success(fmt_result)\n\n result = read_file(test_file_name)\n result = re.sub(re.escape('object RscCompat {'), 'object RscCompatFixed {', result)\n expected = read_file(fixed_file_name)\n self.assertEqual(result, expected)\n", "sub_path": "tests/python/pants_test/backend/jvm/tasks/test_scalafix.py", "file_name": "test_scalafix.py", "file_ext": "py", "file_size_in_byte": 3559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pants_test.pants_run_integration_test.PantsRunIntegrationTest", "line_number": 13, "usage_type": "name"}, {"api_name": "pants.util.dirutil.read_file", "line_number": 110, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 111, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 111, "usage_type": "call"}, {"api_name": "pants.util.dirutil.read_file", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "257541886", "text": "from typing import List\nclass Solution:\n def maxArea(self,a:List[int]) -> int:\n i = 0 # first element index\n j = len(a)-1 # last element index\n m = 0 # set max to zero\n while ( i < j ): # always start the loop until i < j\n m = max(m,min(a[i],a[j])*(j-i))# example : max(0,min(1,7)*(8-0)) => max(0,8) => 8 = m \n if ( a[i] < a[j] ): # 1 < 7 no need for 1 to compare with another one\n i += 1 # i = 8\n else:\n j -= 1\n print(m)\nif __name__ == \"__main__\":\n height = [1,8,6,2,5,4,8,3,7]\n s = Solution()\n s.maxArea(height)\n", "sub_path": "container_with_most_water/solve2.py", "file_name": "solve2.py", "file_ext": "py", "file_size_in_byte": 562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "typing.List", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "322018170", "text": "from elasticsearch import Elasticsearch\n \ndata=\"\"\nes = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\n\nwhile True:\n count=0\n query = input(\"Enter question\\n\")\n query.replace(\"what\",\"\")\n query.replace(\"why\",\"\")\n response = es.search(index=\"_all\", body={\"query\": {\"match\": {'text':query} },\n \"indices_boost\" : { \"qna\" : 1.2 }}\n )['hits']['hits']#[0]['_source']['text'])\n finalresp=\"\"\n prefix=\"\"\n for resp in response:\n print(resp)\n count=count+1\n break\n '''\n if query.lower() == resp['_source']['text'][0:len(query)].lower():\n if len(query.split())<4:\n if query in resp['_source']['text'].lower():\n prefix = resp['_source']['text'].lstrip() +\" \"\n \" \".join(prefix.split()) \n else:\n prefix = resp['_source']['text'].lstrip() +\" \"\n \" \".join(prefix.split())\n else:\n if len(query.split())<4:\n if query in resp['_source']['text'].lower():\n finalresp = finalresp +\" \"+ resp['_source']['text']\n \" \".join(finalresp.split()) \n else:\n finalresp = finalresp +\" \"+ resp['_source']['text']\n \" \".join(finalresp.split()) \n '''\n", "sub_path": "testkb.py", "file_name": "testkb.py", "file_ext": "py", "file_size_in_byte": 1335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "elasticsearch.Elasticsearch", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "3104029", "text": "import jieba\nimport codecs\nimport sys\nimport pandas\nimport numpy as np\nimport imageio\nfrom wordcloud import WordCloud, ImageColorGenerator\nfrom os import listdir\nfrom os.path import isfile, join\nfrom Love.settings import BASE_DIR\nimport os\n\n\nstopwords_filename = os.path.join(BASE_DIR,'LoveAPP/mwordcloud/data/stopwords.txt' )\nfont_filename = os.path.join(BASE_DIR,'LoveAPP/mwordcloud/fonts/STFangSong.ttf' )\ntemplate_dir = os.path.join(BASE_DIR,'LoveAPP/mwordcloud/data/templates/' )\n\ndef create_word_cloud(content_lines,des_dir,csv_flag):\n content = '\\n'.join([line.strip() for line in content_lines if len(line.strip()) > 0 and line[0] != '<' and line[0] != '\\\"' and line[0] != \"\t\"])\n stopwords = set([line.strip() for line in codecs.open(stopwords_filename, 'r', 'utf-8')])\n\n segs = jieba.cut(content)\n words = []\n for seg in segs:\n word = seg.strip().lower()\n if len(word) > 1 and word not in stopwords:\n words.append(word)\n\n words_df = pandas.DataFrame({'word': words})\n words_stat = words_df.groupby(by=['word'])['word'].agg(np.size)\n words_stat = words_stat.to_frame()\n words_stat.columns = ['number']\n words_stat = words_stat.reset_index().sort_values(by=\"number\", ascending=False)\n\n print('# of different words =', len(words_stat))\n img_list= []\n\n prefix_time = csv_flag\n for file in listdir(template_dir):\n if file[-4:] != '.png' and file[-4:] != '.jpg':\n continue\n background_picture_filename = join(template_dir, file)\n if isfile(background_picture_filename):\n prefix = str(file.split('.')[0])\n\n bimg = imageio.imread(background_picture_filename)\n wordcloud = WordCloud(font_path=font_filename, background_color='white',\n mask=bimg, max_font_size=600, random_state=100)\n wordcloud = wordcloud.fit_words(\n dict(words_stat.head(100).itertuples(index=False)))\n\n bimgColors = ImageColorGenerator(bimg)\n wordcloud.recolor(color_func=bimgColors)\n file_name = prefix_time + \"_\" + prefix + '.png'\n output_filename = os.path.join(des_dir, file_name)\n img_list.append(file_name)\n\n print('Saving', output_filename)\n wordcloud.to_file(output_filename)\n\n top_words_file = os.path.join(des_dir,prefix_time + \"_top_words_tab.txt\")\n top_word = open(top_words_file,'w')\n tmp = words_stat.head(len(words_stat))\n # print(tmp)\n # print(tmp.shape)\n # print(tmp.iloc[0])\n # print(tmp.iloc[0][0])\n top_10 = []\n for i in range(len(words_stat)):\n top_word.write(\"{}\\t{}\\n\".format(str(tmp.iloc[i][0]),str(tmp.iloc[i][1])))\n if i<10:\n top_10.append(str(tmp.iloc[i][0]))\n # print(words_stat.head(10))\n return img_list,top_10\n", "sub_path": "LoveAPP/mwordcloud/create_word_cloud.py", "file_name": "create_word_cloud.py", "file_ext": "py", "file_size_in_byte": 2836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "Love.settings.BASE_DIR", "line_number": 14, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "Love.settings.BASE_DIR", "line_number": 15, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "Love.settings.BASE_DIR", "line_number": 16, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 20, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 43, "usage_type": "call"}, {"api_name": "imageio.imread", "line_number": 46, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 47, "usage_type": "call"}, {"api_name": "wordcloud.fit_words", "line_number": 49, "usage_type": "call"}, {"api_name": "wordcloud.ImageColorGenerator", "line_number": 52, "usage_type": "call"}, {"api_name": "wordcloud.recolor", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "wordcloud.to_file", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "516825823", "text": "import os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\nfrom log_writer import write_parameters, write_batch\n\n\nimport numpy as np\nimport pickle\nimport torch\nfrom torch import nn\nfrom torch.utils.data.dataloader import DataLoader\n\nimport configurations\nfrom t4sa_dataset import T4saDataset\nfrom text_analyzer.model import LSTMModel\nfrom vocab import Vocab\n\n\ndef get_vocabulary(refresh=False):\n vocab_path = os.path.join(\"vocabulary.pickle\")\n if os.path.exists(vocab_path) and refresh:\n os.remove(vocab_path)\n if os.path.exists(vocab_path):\n with open(vocab_path, \"rb\") as f_obj:\n return pickle.load(f_obj)\n\n train_dataset = T4saDataset(train=True, configs=configurations, load_image=False)\n texts = [item[\"description\"] for item in train_dataset]\n vocab = Vocab(texts)\n with open(vocab_path, \"wb\") as f_obj:\n pickle.dump(vocab, f_obj)\n return vocab\n\n\ndef indices_to_one_hot(data, nb_classes):\n \"\"\"Convert an iterable of indices to one-hot encoded labels.\"\"\"\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]\n\n\ndef get_train_loader():\n train_dataset = T4saDataset(train=True, configs=configurations, load_image=False, limit=configurations.training_size)\n return DataLoader(dataset=train_dataset,\n batch_size=configurations.batch_size,\n shuffle=True)\n\n\ndef get_test_loader():\n test_dataset = T4saDataset(train=False, configs=configurations, load_image=False, limit=configurations.eval_size)\n return DataLoader(dataset=test_dataset,\n batch_size=configurations.batch_size,\n shuffle=True)\n\n\ndef _train(net, vocab, train_loader, criterion, optimizer, epochs):\n net.train()\n # train for some number of epochs\n for e in range(epochs):\n step = 0\n # initialize hidden state\n\n # batch loop\n for data in train_loader:\n step += 1\n optimizer.zero_grad()\n\n inputs, labels = data[\"description\"], data[\"classification\"]\n labels = torch.from_numpy(indices_to_one_hot(labels, configurations.output_size)).type(torch.float).to(\n configurations.DEVICE)\n inputs = torch.from_numpy(vocab.encode(data[\"description\"], configurations.seq_length)).to(\n configurations.DEVICE)\n\n # zero accumulated gradients\n net.zero_grad()\n output = net.forward(inputs)\n loss = criterion(output, labels)\n loss.backward()\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), configurations.clip)\n optimizer.step()\n\n yield (e, step)\n\n\ndef _evaluate(net, vocab, test_loader, criterion, epoch, step):\n # Get validation loss\n val_losses = []\n correct = 0\n total = 0\n net.eval()\n for data in test_loader:\n inputs, labels = data[\"description\"], data[\"classification\"]\n hotspot_labels = torch.from_numpy(indices_to_one_hot(labels, configurations.output_size))\n hotspot_labels = torch.tensor(hotspot_labels, dtype=torch.float)\n inputs = torch.from_numpy(vocab.encode(data[\"description\"], configurations.seq_length))\n inputs, hotspot_labels = inputs.to(configurations.DEVICE), hotspot_labels.to(configurations.DEVICE)\n\n output = net.forward(inputs)\n loss = criterion(output, hotspot_labels)\n val_loss = criterion(output, hotspot_labels)\n val_losses.append(val_loss.item())\n\n _, predicted = torch.max(output.data, 1)\n\n # Total number of labels\n total += labels.size(0)\n\n labels = labels.to(configurations.DEVICE)\n\n # Total correct predictions\n correct += (predicted == labels).sum()\n\n accuracy = 100 * correct / total\n net.train()\n write_batch(model_name=\"rnn\", epoch=epoch, batch=step, accuracy=accuracy.item(), loss=loss.item())\n print(\"Loss: {:.6f}...\".format(loss.item()),\n \"Val Loss: {:.6f}\".format(np.mean(val_losses)),\n \"Accuracy : {:.6f}\".format(accuracy))\n\n\ndef train_and_evaluate():\n hidden_dim = 128\n layer_dim = 3\n lr = 0.01\n\n train_loader = get_train_loader()\n test_loader = get_test_loader()\n vocab = get_vocabulary()\n vocab_size = len(vocab.vocab) + 1 # +1 for the 0 padding\n write_parameters(\"rnn\", {\"image_size\": configurations.image_size,\n \"batch_size\": configurations.batch_size,\n \"training_size\": configurations.training_size,\n \"eval_size\": configurations.eval_size,\n \"epochs\": configurations.epochs,\n \"output_size\": configurations.output_size,\n \"hidden_dim\": hidden_dim,\n \"embedding_dim\": configurations.embedding_dim,\n \"vocab_size\": vocab_size,\n \"layer_dim\": layer_dim})\n net = LSTMModel(vocab_size, configurations.embedding_dim, hidden_dim, layer_dim, configurations.output_size).to(configurations.DEVICE)\n criterion = nn.BCELoss()\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n print_every = 5\n epochs = configurations.epochs\n\n for (epoch, step) in _train(net, vocab, train_loader, criterion, optimizer, epochs):\n if step % print_every == 0:\n print(\"Epoch: {}/{}...\".format(epoch + 1, epochs),\n \"Step: {}...\".format(step))\n _evaluate(net, vocab, test_loader, criterion, epoch, step)\n\n return net\n\n\ndef run_t(net):\n criterion = nn.BCELoss()\n vocab = get_vocabulary()\n test_loader = get_test_loader()\n val_losses = []\n correct = 0\n total = 0\n net.eval()\n for data in test_loader:\n inputs, labels = data[\"description\"], data[\"classification\"]\n hotspot_labels = torch.from_numpy(indices_to_one_hot(labels, configurations.output_size))\n hotspot_labels = torch.tensor(hotspot_labels, dtype=torch.float)\n inputs = torch.from_numpy(vocab.encode(data[\"description\"], configurations.seq_length))\n inputs, hotspot_labels = inputs.to(configurations.DEVICE), hotspot_labels.to(configurations.DEVICE)\n\n output = net.forward(inputs)\n loss = criterion(output, hotspot_labels)\n val_loss = criterion(output, hotspot_labels)\n val_losses.append(val_loss.item())\n\n _, predicted = torch.max(output.data, 1)\n\n # Total number of labels\n total += labels.size(0)\n\n labels = labels.to(configurations.DEVICE)\n\n # Total correct predictions\n correct += (predicted == labels).sum()\n\n accuracy = 100 * correct / total\n print(\"Loss: {:.6f}...\".format(loss.item()),\n \"Val Loss: {:.6f}\".format(np.mean(val_losses)),\n \"Accuracy : {:.6f}\".format(accuracy))\n\n\ndef get_model():\n path = os.path.join(os.path.dirname(__file__), \"trained_sentence_sentiment_model\")\n if os.path.exists(path):\n return torch.load(path)\n model = train_and_evaluate()\n torch.save(model, path)\n return model\n\n\nnet = get_model()\nrun_t(net)", "sub_path": "text_analyzer/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 7280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 26, "usage_type": "call"}, {"api_name": "t4sa_dataset.T4saDataset", "line_number": 28, "usage_type": "call"}, {"api_name": "vocab.Vocab", "line_number": 30, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 39, "usage_type": "call"}, {"api_name": "t4sa_dataset.T4saDataset", "line_number": 43, "usage_type": "call"}, {"api_name": "configurations.training_size", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 44, "usage_type": "call"}, {"api_name": "configurations.batch_size", "line_number": 45, "usage_type": "attribute"}, {"api_name": "t4sa_dataset.T4saDataset", "line_number": 50, "usage_type": "call"}, {"api_name": "configurations.eval_size", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "configurations.batch_size", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 69, "usage_type": "call"}, {"api_name": "configurations.output_size", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.float", "line_number": 69, "usage_type": "attribute"}, {"api_name": "configurations.DEVICE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 71, "usage_type": "call"}, {"api_name": "vocab.encode", "line_number": 71, "usage_type": "call"}, {"api_name": "configurations.seq_length", "line_number": 71, "usage_type": "attribute"}, {"api_name": "configurations.DEVICE", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "configurations.clip", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 94, "usage_type": "call"}, {"api_name": "configurations.output_size", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 96, "usage_type": "call"}, {"api_name": "vocab.encode", "line_number": 96, "usage_type": "call"}, {"api_name": "configurations.seq_length", "line_number": 96, "usage_type": "attribute"}, {"api_name": "configurations.DEVICE", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 104, "usage_type": "call"}, {"api_name": "configurations.DEVICE", "line_number": 109, "usage_type": "attribute"}, {"api_name": "log_writer.write_batch", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 118, "usage_type": "call"}, {"api_name": "vocab.vocab", "line_number": 130, "usage_type": "attribute"}, {"api_name": "log_writer.write_parameters", "line_number": 131, "usage_type": "call"}, {"api_name": "configurations.image_size", "line_number": 131, "usage_type": "attribute"}, {"api_name": "configurations.batch_size", "line_number": 132, "usage_type": "attribute"}, {"api_name": "configurations.training_size", "line_number": 133, "usage_type": "attribute"}, {"api_name": "configurations.eval_size", "line_number": 134, "usage_type": "attribute"}, {"api_name": "configurations.epochs", "line_number": 135, "usage_type": "attribute"}, {"api_name": "configurations.output_size", "line_number": 136, "usage_type": "attribute"}, {"api_name": "configurations.embedding_dim", "line_number": 138, "usage_type": "attribute"}, {"api_name": "text_analyzer.model.LSTMModel", "line_number": 141, "usage_type": "call"}, {"api_name": "configurations.embedding_dim", "line_number": 141, "usage_type": "attribute"}, {"api_name": "configurations.output_size", "line_number": 141, "usage_type": "attribute"}, {"api_name": "configurations.DEVICE", "line_number": 141, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 143, "usage_type": "attribute"}, {"api_name": "configurations.epochs", "line_number": 145, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 157, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 166, "usage_type": "call"}, {"api_name": "configurations.output_size", "line_number": 166, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 167, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 168, "usage_type": "call"}, {"api_name": "vocab.encode", "line_number": 168, "usage_type": "call"}, {"api_name": "configurations.seq_length", "line_number": 168, "usage_type": "attribute"}, {"api_name": "configurations.DEVICE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 176, "usage_type": "call"}, {"api_name": "configurations.DEVICE", "line_number": 181, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "278095285", "text": "import io, sys, os, pytest, re\npath = os.path.dirname(os.path.abspath(__file__))+'/app.py'\n\n\n@pytest.mark.it(\"Max integer from the list\")\ndef test_output(capsys, app):\n app()\n captured = capsys.readouterr()\n assert \"5435\\n\" in captured.out\n\n@pytest.mark.it(\"Use the for loop\")\ndef test_for_loop():\n with open(path, 'r') as content_file:\n content = content_file.read()\n regex = re.compile(r\"for(\\s)*\")\n assert bool(regex.search(content)) == True\n\n@pytest.mark.it(\"Use if statement\")\ndef test_if():\n with open(path, 'r') as content_file:\n content = content_file.read()\n regex = re.compile(r\"if(\\s)*\")\n assert bool(regex.search(content)) == True", "sub_path": "exercises/09-Max_integer_from_list/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 2, "usage_type": "call"}, {"api_name": "pytest.mark.it", "line_number": 5, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 5, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "pytest.mark.it", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.mark.it", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "467015884", "text": "# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n#\n\nimport numpy as np\nimport torch\nfrom mmcv.parallel import collate, scatter\n\nfrom ..datasets.pipelines import Compose\n\n\ndef get_fake_data(orig_img_shape, stream_sample_frames):\n data = {}\n data['clip_len'] = stream_sample_frames.clip_len\n data['num_clips'] = stream_sample_frames.num_clips\n data['imgs'] = [np.zeros(orig_img_shape, dtype=np.uint8), ] * data['clip_len']\n data['modality'] = 'RGB'\n\n return data\n\n\ndef get_fake_input(cfg, orig_img_shape=(128, 128, 3), device='cuda'):\n test_pipeline = cfg.data.test.pipeline[2:]\n test_pipeline = Compose(test_pipeline)\n data = get_fake_data(orig_img_shape, stream_sample_frames=cfg.data.test.pipeline[0])\n data = test_pipeline(data)\n data = scatter(collate([data], samples_per_gpu=1), [device])[0]\n return data\n", "sub_path": "mmaction/apis/fake_input.py", "file_name": "fake_input.py", "file_ext": "py", "file_size_in_byte": 1377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 27, "usage_type": "attribute"}, {"api_name": "datasets.pipelines.Compose", "line_number": 35, "usage_type": "call"}, {"api_name": "mmcv.parallel.scatter", "line_number": 38, "usage_type": "call"}, {"api_name": "mmcv.parallel.collate", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "503090588", "text": "#!/usr/bin/env python3\n\nimport http.client\nimport json\nimport os\nimport sys\nimport re\nimport yaml\n\n\ndef parse_config_file():\n config = {}\n scriptbasename = __file__\n scriptbasename = os.path.basename(scriptbasename)\n scriptbasename = scriptbasename.replace('.py', '')\n config_file = os.path.join(os.path.dirname(\n __file__), '%s.yml' % scriptbasename)\n\n if os.path.isfile(config_file):\n with open(config_file) as f:\n try:\n config = yaml.safe_load(f.read())\n except Exception as exc:\n sys.exit(\"Error: parsing %s - %s\" % (config_file, str(exc)))\n else:\n sys.exit(\"Error: config file does not exist - \" + config_file)\n return config\n\n\ndef get_token_from_api(config):\n conn = http.client.HTTPSConnection(\"iam.cloud.ibm.com\")\n payload = 'grant_type=urn%3Aibm%3Aparams%3Aoauth%3Agrant-type%3Aapikey&apikey=' + \\\n config['ibm_cloud_api_key']\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json',\n 'Cache-Control': 'no-cache'\n }\n\n try:\n conn.request(\"POST\", \"/identity/token\", payload, headers)\n res = conn.getresponse().read()\n data = res.decode(\"utf-8\")\n json_res = json.loads(data)\n return json_res['token_type'] + ' ' + json_res['access_token']\n except Exception as error:\n print(f\"Error getting token. {error}\")\n raise\n\n\ndef get_instances(config):\n conn = http.client.HTTPSConnection(\n config['ibm_cloud_region'] + \".iaas.cloud.ibm.com\")\n headers = {\n 'Content-Type': 'application/json',\n 'Cache-Control': 'no-cache',\n 'Accept': 'application/json',\n 'Authorization': config['token'],\n 'cache-control': 'no-cache'\n }\n version = \"2019-05-31\"\n payload = \"\"\n try:\n instances = []\n page_url = \"/v1/instances?generation=1&limit=1&version=\" + version\n while True:\n conn.request(\"GET\", page_url, payload, headers)\n res = conn.getresponse()\n data = res.read()\n page_data = json.loads(data.decode(\"utf-8\"))\n instances = instances + page_data['instances']\n print(json.dumps(page_data, indent=2, sort_keys=True))\n if page_data.get('next'):\n page_url = page_data['next']['href']\n else:\n break\n\n print(json.dumps(instances, indent=2, sort_keys=True))\n return instances\n except Exception as error:\n print(f\"Error fetching VPCs. {error}\")\n raise\n\n\ndef build_inventory(instances):\n inventory = {}\n inventory['_meta'] = {}\n inventory['_meta']['hostvars'] = {}\n inventory['all'] = {}\n inventory['all']['hosts'] = []\n\n for d in instances:\n inventory['_meta']['hostvars'][d['name']] = {}\n inventory['_meta']['hostvars'][d['name']\n ]['ansible_host'] = d['primary_network_interface']['primary_ipv4_address']\n inventory['all']['hosts'].append(d['name'])\n\n return inventory\n\n\nif len(sys.argv) == 2 and sys.argv[1] == '--list':\n config = parse_config_file()\n config['token'] = get_token_from_api(config)\n instances = get_instances(config)\n inventory = build_inventory(instances)\n print(json.dumps(inventory, indent=2, sort_keys=True))\nelif len(sys.argv) == 3 and sys.argv[1] == '--host':\n print(json.dumps({'ansible_connection': 'ssh'}))\nelse:\n sys.stderr.write(\"Need an argument, either --list or --host \\n\")\n\nexit()\n", "sub_path": "Ansible_Tower/Playbook_Examples/IBM_CLOUD/inventory/ibm_cloud.py", "file_name": "ibm_cloud.py", "file_ext": "py", "file_size_in_byte": 3563, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.basename", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 26, "usage_type": "call"}, {"api_name": "http.client.client.HTTPSConnection", "line_number": 31, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 31, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 31, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "http.client.client.HTTPSConnection", "line_number": 52, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 52, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 52, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 70, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 72, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 101, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 106, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 107, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 108, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 110, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 110, "usage_type": "attribute"}]} +{"seq_id": "448922229", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import root_scalar,minimize\nimport scipy.integrate as integrate\nimport matplotlib.gridspec as gridspec\n\n\nfig=plt.figure()\ngs=gridspec.GridSpec(1,2,width_ratios=[1,1],height_ratios=[1])\ngs.update(wspace=0.4)\n\nax1=plt.subplot(gs[1])\nax2=plt.subplot(gs[0])\n\n\ndef integrand(r,epsilon,q,alpha,alphaprime):\n\ty = 1/epsilon + (1/epsilon)*(1+(q-1)*np.sin(alpha*r)**2)*(1+(1/q - 1)*np.sin(alphaprime*r)**2)\n\ty += epsilon**2 * (1+(q-1)*np.cos(alpha*r)**2)*(1+(1/q-1)*np.cos(alphaprime*r)**2)\n\ty+= 2*(epsilon**(1/2))*(2-q-1/q) * np.sin(alpha*r)*np.cos(alpha*r)*np.sin(alphaprime*r)*np.cos(alphaprime*r)\n\n\treturn y*r\n\ndef integral(alphaprime,epsilon,q,alpha,R):\n\tI = integrate.quad(integrand,0,R,args=(epsilon,q,alpha,alphaprime))[0] \n\treturn I/(R**2)\n\n#Figure 5:\n\nalpha = 0.07 / 25\n\nqlist=[1.5]\nepsilonlist=np.linspace(1,2,num=1000)\nfor i in qlist:\n\txlist=[]\n\tfor j in range(len(epsilonlist)):\n\t\tR = 25/np.sqrt(epsilonlist[j])\n\t\tx0=alpha\n\t\txlist.append(minimize(integral,x0,args=(epsilonlist[j],i,alpha,R)).x)\n\tax2.plot(epsilonlist,xlist)\n\n\n\ndef stressintegrand(r,epsilon,q,alpha,alphaprime):\n\ty = -1/(epsilon**2) + (-1/(epsilon**2))*(1+(q-1)*np.sin(alpha*r)**2)*(1+(1/q - 1)*np.sin(alphaprime*r)**2)\n\ty += 2*epsilon * (1+(q-1)*np.cos(alpha*r)**2)*(1+(1/q-1)*np.cos(alphaprime*r)**2)\n\ty+= (epsilon**(-1/2))*(2-q-1/q) * np.sin(alpha*r)*np.cos(alpha*r)*np.sin(alphaprime*r)*np.cos(alphaprime*r)\n\n\treturn y*r\n\ndef stressintegral(alphaprime,epsilon,q,alpha,R):\n\tI = integrate.quad(stressintegrand,0,R,args=(epsilon,q,alpha,alphaprime))[0] \n\treturn I/(R**2)\n\ndef sigma(alphaprime,epsilon,q,alpha,R):\n\ty = (1/epsilon) * integral(alphaprime,epsilon,q,alpha,R)\n\ty += (2*epsilon/(R**2)) * ( stressintegral(alphaprime,epsilon,q,alpha,R) - integrand(R,epsilon,q,alpha,alphaprime)*R/(2*epsilon**(3/2)))\n\n\treturn y\t\n\n#Figure 6:\n\nalpha = 0.07 / 25\nqlist=[1.5]\nepsilonlist=np.linspace(1,1.5,num=1000)\nfor j in qlist:\n\tsigmalist=[]\n\tfor i in range(len(epsilonlist)):\n\t\tR = 25/np.sqrt(epsilonlist[i])\n\t\tx0=alpha\n\t\tsigmalist.append(100*stressintegral(minimize(integral,x0,args=(epsilonlist[i],j,alpha,R)).x,epsilonlist[i],j,alpha,R))\n\n\tax1.plot(epsilonlist,sigmalist)\n\n\n\nax1.set_xlabel('$\\epsilon$ (Longitudinal Strain)')\nax1.set_ylabel('$\\sigma_{zz}$ (Longitudinal Stress) (MPa)')\nax1.set_xlim(1,)\nax1.set_ylim(0,)\n\nax2.set_xlabel('$\\epsilon$ (Longitudinal Strain)')\nax2.set_ylabel('$\\\\alpha^*$ (rad/nm)')\nax2.set_xlim(1,2)\n\nplt.show()", "sub_path": "Scripts/Old/FigureMaker.py", "file_name": "FigureMaker.py", "file_ext": "py", "file_size_in_byte": 2478, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.integrate.quad", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.integrate.quad", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "546292016", "text": "\"\"\"The command for archiving a habit.\"\"\"\nfrom dataclasses import dataclass\n\nfrom jupiter.core.domain.habits.service.archive_service import HabitArchiveService\nfrom jupiter.core.framework.base.entity_id import EntityId\nfrom jupiter.core.framework.event import EventSource\nfrom jupiter.core.framework.use_case import (\n ContextProgressReporter,\n UseCaseArgsBase,\n)\nfrom jupiter.core.use_cases.infra.use_cases import (\n AppLoggedInMutationUseCase,\n AppLoggedInUseCaseContext,\n)\n\n\n@dataclass\nclass HabitArchiveArgs(UseCaseArgsBase):\n \"\"\"PersonFindArgs.\"\"\"\n\n ref_id: EntityId\n\n\nclass HabitArchiveUseCase(AppLoggedInMutationUseCase[HabitArchiveArgs, None]):\n \"\"\"The command for archiving a habit.\"\"\"\n\n async def _execute(\n self,\n progress_reporter: ContextProgressReporter,\n context: AppLoggedInUseCaseContext,\n args: HabitArchiveArgs,\n ) -> None:\n \"\"\"Execute the command's action.\"\"\"\n async with self._storage_engine.get_unit_of_work() as uow:\n habit = await uow.habit_repository.load_by_id(args.ref_id)\n await HabitArchiveService(\n EventSource.CLI,\n self._time_provider,\n self._storage_engine,\n ).do_it(progress_reporter, habit)\n", "sub_path": "src/core/jupiter/core/use_cases/habits/archive.py", "file_name": "archive.py", "file_ext": "py", "file_size_in_byte": 1252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "jupiter.core.framework.use_case.UseCaseArgsBase", "line_number": 18, "usage_type": "name"}, {"api_name": "jupiter.core.framework.base.entity_id.EntityId", "line_number": 21, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 17, "usage_type": "name"}, {"api_name": "jupiter.core.use_cases.infra.use_cases.AppLoggedInMutationUseCase", "line_number": 24, "usage_type": "name"}, {"api_name": "jupiter.core.framework.use_case.ContextProgressReporter", "line_number": 29, "usage_type": "name"}, {"api_name": "jupiter.core.use_cases.infra.use_cases.AppLoggedInUseCaseContext", "line_number": 30, "usage_type": "name"}, {"api_name": "jupiter.core.domain.habits.service.archive_service.HabitArchiveService", "line_number": 36, "usage_type": "call"}, {"api_name": "jupiter.core.framework.event.EventSource.CLI", "line_number": 37, "usage_type": "attribute"}, {"api_name": "jupiter.core.framework.event.EventSource", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "246824395", "text": "import matplotlib.pyplot as plt\nimport numpy as np\ndata = np.loadtxt(\"cs8_100000_40\")\nexp_data_PHENIX = np.loadtxt(\"cross_section_pi0_exp_PHENIX_2007.txt\")\ncomp_data = np.loadtxt(\"cross_section_pi0.txt\")\n\nplt.figure()\nplt.errorbar(data.T[0], data.T[1]/(2*np.pi), yerr=data.T[2]/(2*np.pi), fmt='o', markersize=1, label='Pythia', capsize=2, elinewidth=0.5)\nplt.yscale(\"log\")\nplt.xlim = (2, 20)\nerr2 = np.sqrt(exp_data_PHENIX.T[2]**2+exp_data_PHENIX.T[3]**2) \nplt.errorbar(exp_data_PHENIX.T[0], exp_data_PHENIX.T[1], yerr=err2, fmt='o', markersize=1, label='PHENIX(2007)', capsize=2, elinewidth=0.5)\nplt.plot(comp_data.T[0], comp_data.T[1], label='factorization')\nplt.ylabel(\"$Ed^3\\sigma/dp^3(mb\\cdot GeV^{-2}c^3)$\")\nplt.xlabel(\"$p_T(GeV/c)$\")\nplt.legend()\n#plt.xticks(np.arange(4.0, 20, 2))\nplt.title(\"pp cross section of midrapidity $\\Pi^0$ production at $\\sqrt{s}=200GeV$\")\nplt.savefig(\"cs8.pdf\")\nplt.show()\n", "sub_path": "Pion_Cross_Section/Pion_Cross_Section_Pythia/Result/plot_pythia.py", "file_name": "plot_pythia.py", "file_ext": "py", "file_size_in_byte": 908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.loadtxt", "line_number": 3, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "272491499", "text": "from django.test import TestCase\nfrom post.models import Post, Categoria\nfrom cuenta.models import Perfil\n\nclass TestModels(TestCase):\n\n def setUp(self):\n self.cat1 = Categoria.objects.create(\n titulo=\"cat1\",\n descripcion=\"desc1\"\n )\n self.perfil1 = Perfil.objects.create(\n username=\"axel\"\n )\n self.post1 = Post.objects.create(\n titulo=\"post1\",\n contenido=\"post1 contenido\",\n permitir_comentarios = True,\n categoria = self.cat1,\n usuario = self.perfil1\n )\n\n def test_post_estado_defecto(self):\n self.assertEquals(self.post1.estado, Post.ESTADO_PUBLICO)\n \n def test_titulo_mas_categoria(self):\n self.assertEquals(self.post1.titulo_mas_categoria(), \"post1-cat1\")", "sub_path": "post/test/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.test.TestCase", "line_number": 5, "usage_type": "name"}, {"api_name": "post.models.Categoria.objects.create", "line_number": 8, "usage_type": "call"}, {"api_name": "post.models.Categoria.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "post.models.Categoria", "line_number": 8, "usage_type": "name"}, {"api_name": "cuenta.models.Perfil.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "cuenta.models.Perfil.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cuenta.models.Perfil", "line_number": 12, "usage_type": "name"}, {"api_name": "post.models.Post.objects.create", "line_number": 15, "usage_type": "call"}, {"api_name": "post.models.Post.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "post.models.Post", "line_number": 15, "usage_type": "name"}, {"api_name": "post.models.Post.ESTADO_PUBLICO", "line_number": 24, "usage_type": "attribute"}, {"api_name": "post.models.Post", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "208600184", "text": "# ------------------------------------------------------------------------\n# coding=utf-8\n# ------------------------------------------------------------------------\n\nfrom django.contrib import admin\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom feincms.admin.item_editor import FeinCMSInline\nfrom feincms.module.medialibrary.fields import ContentWithMediaFile\n\n\nclass MediaFileContentInline(FeinCMSInline):\n raw_id_fields = ('mediafile',)\n radio_fields = {'type': admin.VERTICAL}\n\n\nclass MediaFileContent(ContentWithMediaFile):\n \"\"\"\n Rehashed, backwards-incompatible media file content which does not contain\n the problems from v1 anymore.\n\n Create a media file content as follows::\n\n from feincms.content.medialibrary.v2 import MediaFileContent\n Page.create_content_type(MediaFileContent, TYPE_CHOICES=(\n ('default', _('Default')),\n ('lightbox', _('Lightbox')),\n ('whatever', _('Whatever')),\n ))\n\n For a media file of type 'image' and type 'lightbox', the following\n templates are tried in order:\n\n * content/mediafile/image_lightbox.html\n * content/mediafile/image.html\n * content/mediafile/lightbox.html\n * content/mediafile/default.html\n\n The context contains ``content`` and ``request`` (if available).\n \"\"\"\n\n feincms_item_editor_inline = MediaFileContentInline\n\n class Meta:\n abstract = True\n verbose_name = _('media file')\n verbose_name_plural = _('media files')\n\n @classmethod\n def initialize_type(cls, TYPE_CHOICES=None, POSITION_CHOICES=None, SIZE_CHOICES=None, DISPLAY_CHOICES=None, FORMAT_CHOICES=None, CROP_CHOICES=None):\n if TYPE_CHOICES is None:\n raise ImproperlyConfigured('You have to set TYPE_CHOICES when'\n ' creating a %s' % cls.__name__)\n\n cls.add_to_class('type', models.CharField(_('type'),\n max_length=20, choices=TYPE_CHOICES, default=TYPE_CHOICES[0][0]))\n\n if POSITION_CHOICES:\n cls.add_to_class('position', models.CharField(_('position'),\n max_length=10,\n choices=POSITION_CHOICES,\n default=POSITION_CHOICES[0][0]\n ))#.contribute_to_class(cls, 'position')\n\n if SIZE_CHOICES:\n cls.add_to_class('size', models.CharField(_('size'),\n max_length=64,\n choices=SIZE_CHOICES,\n default=SIZE_CHOICES[0][0]\n ))\n\n if DISPLAY_CHOICES:\n cls.add_to_class('display', models.CharField(_('display'),\n max_length=16,\n choices=DISPLAY_CHOICES,\n default=DISPLAY_CHOICES[0][0]\n ))\n\n if CROP_CHOICES:\n cls.add_to_class('crop', models.CharField(_('crop'),\n max_length=10,\n choices=CROP_CHOICES,\n default=CROP_CHOICES[0][0]\n ))\n\n if FORMAT_CHOICES:\n cls.add_to_class('format', models.CharField(_('format'),\n max_length=64,\n choices=FORMAT_CHOICES,\n default=FORMAT_CHOICES[0][0]\n ))#.contribute_to_class(cls, 'format')\n\n\n def render(self, **kwargs):\n ctx = {'content': self}\n ctx.update(kwargs)\n return render_to_string([\n 'content/mediafile/%s_%s.html' % (self.mediafile.type, self.type),\n 'content/mediafile/%s.html' % self.mediafile.type,\n 'content/mediafile/%s.html' % self.type,\n 'content/mediafile/default.html',\n ], ctx, context_instance=kwargs.get('context'))\n", "sub_path": "feincms/content/sorlmedialibrary/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3775, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "feincms.admin.item_editor.FeinCMSInline", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.admin.VERTICAL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 17, "usage_type": "name"}, {"api_name": "feincms.module.medialibrary.fields.ContentWithMediaFile", "line_number": 20, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 49, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 50, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 90, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "489961547", "text": "# Copyright 2020 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport logging\nimport warnings\n\nimport apache_beam as beam\n\nfrom klio_audio.transforms import io as aio\nfrom klio_audio.transforms import audio\n\nimport transforms\n\n\nwarnings.simplefilter(\"ignore\")\nloggers_to_mute = (\n \"apache_beam.io.filebasedsink\",\n \"apache_beam.runners.worker.statecache\",\n \"apache_beam.runners.portability.fn_api_runner\",\n \"apache_beam.runners.portability.fn_api_runner_transforms\",\n \"apache_beam.internal.gcp.auth\",\n \"oauth2client.transport\",\n \"oauth2client.client\",\n \"klio.metrics\",\n # The concurrency logs may be different for every machine, so let's\n # just turn them off\n \"klio.concurrency\",\n)\nfor logger in loggers_to_mute:\n logging.getLogger(logger).setLevel(logging.ERROR)\nlogging.getLogger(\"klio\").setLevel(logging.DEBUG)\n\n\ndef run(in_pcol, job_config):\n # load 5 seconds of audio and get STFT\n stft = (\n in_pcol\n | aio.GcsLoadBinary()\n | audio.LoadAudio(offset=10, duration=5)\n | audio.GetSTFT()\n )\n\n # get magnitude of audio\n magnitude = (\n stft | \"Get magnitude\" >> beam.ParDo(transforms.GetMagnitude()).with_outputs()\n )\n\n # map the result to a key (the KlioMessage element)\n # so we can group all results by key\n magnitude_key = (\n magnitude.spectrogram\n | \"element to spec\" >> beam.Map(transforms.create_key_from_element)\n )\n # get nearest neighbors and map the result to a key (the KlioMessage element)\n nn_filter = (\n magnitude.spectrogram\n | \"Get nn filter\" >> beam.ParDo(transforms.FilterNearestNeighbors())\n | \"element to filter\" >> beam.Map(transforms.create_key_from_element)\n )\n\n # map together the full magnitude with its filter by key (the KlioMessage element)\n merge = (\n {\"full\": magnitude_key, \"nnfilter\": nn_filter}\n | \"merge\" >> beam.CoGroupByKey()\n )\n\n # calc the difference between full magnitude and the filter\n net = merge | beam.Map(transforms.subtract_filter_from_full)\n\n # create a mask from the filter minus the difference of full & filter\n first_mask = (\n {\"first\": nn_filter, \"second\": net, \"full\": magnitude_key}\n | \"first mask group\" >> beam.CoGroupByKey()\n | \"first mask\" >> beam.ParDo(transforms.GetSoftMask(margin=2))\n )\n # create another mask from the difference of full & filter minus the filter\n second_mask = (\n {\"first\": net, \"second\": nn_filter, \"full\": magnitude_key}\n | \"second mask group\" >> beam.CoGroupByKey()\n | \"second mask\" >> beam.ParDo(transforms.GetSoftMask(margin=10))\n )\n\n # plot the full magnitude spectrogram\n magnitude_out = (\n magnitude.spectrogram\n | \"full spec\" >> audio.GetSpec()\n | \"plot full spec\" >> audio.SpecToPlot(title=\"Full Spectrogam for {element}\", y_axis=\"log\")\n | \"save full\" >> aio.GcsUploadPlot(suffix=\"-full\")\n )\n # plot the first mask (background) spectrogram\n background_out = (\n first_mask\n | \"background spec\" >> audio.GetSpec()\n | \"plot background spec\" >> audio.SpecToPlot(title=\"Background Spectrogam for {element}\", y_axis=\"log\")\n | \"save background\" >> aio.GcsUploadPlot(suffix=\"-background\")\n )\n # plot the second mask (foreground) spectrogram\n foreground_out = (\n second_mask\n | \"foreground spec\" >> audio.GetSpec()\n | \"plot forground spec\" >> audio.SpecToPlot(title=\"Foreground Spectrogam for {element}\", y_axis=\"log\")\n | \"save foreground\" >> aio.GcsUploadPlot(suffix=\"-foreground\")\n )\n\n return (\n (magnitude_out, background_out, foreground_out)\n | \"flatten output paths\" >> beam.Flatten()\n | \"remove dups\" >> beam.Distinct()\n )\n", "sub_path": "integration/audio-spectrograms/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 4296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "warnings.simplefilter", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 43, "usage_type": "attribute"}, {"api_name": "klio_audio.transforms.io.GcsLoadBinary", "line_number": 50, "usage_type": "call"}, {"api_name": "klio_audio.transforms.io", "line_number": 50, "usage_type": "name"}, {"api_name": "klio_audio.transforms.audio.LoadAudio", "line_number": 51, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 51, "usage_type": "name"}, {"api_name": "klio_audio.transforms.audio.GetSTFT", "line_number": 52, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 52, "usage_type": "name"}, {"api_name": "apache_beam.ParDo", "line_number": 57, "usage_type": "call"}, {"api_name": "transforms.GetMagnitude", "line_number": 57, "usage_type": "call"}, {"api_name": "apache_beam.Map", "line_number": 64, "usage_type": "call"}, {"api_name": "transforms.create_key_from_element", "line_number": 64, "usage_type": "attribute"}, {"api_name": "apache_beam.ParDo", "line_number": 69, "usage_type": "call"}, {"api_name": "transforms.FilterNearestNeighbors", "line_number": 69, "usage_type": "call"}, {"api_name": "apache_beam.Map", "line_number": 70, "usage_type": "call"}, {"api_name": "transforms.create_key_from_element", "line_number": 70, "usage_type": "attribute"}, {"api_name": "apache_beam.CoGroupByKey", "line_number": 76, "usage_type": "call"}, {"api_name": "apache_beam.Map", "line_number": 80, "usage_type": "call"}, {"api_name": "transforms.subtract_filter_from_full", "line_number": 80, "usage_type": "attribute"}, {"api_name": "apache_beam.CoGroupByKey", "line_number": 85, "usage_type": "call"}, {"api_name": "apache_beam.ParDo", "line_number": 86, "usage_type": "call"}, {"api_name": "transforms.GetSoftMask", "line_number": 86, "usage_type": "call"}, {"api_name": "apache_beam.CoGroupByKey", "line_number": 91, "usage_type": "call"}, {"api_name": "apache_beam.ParDo", "line_number": 92, "usage_type": "call"}, {"api_name": "transforms.GetSoftMask", "line_number": 92, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio.GetSpec", "line_number": 98, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 98, "usage_type": "name"}, {"api_name": "klio_audio.transforms.audio.SpecToPlot", "line_number": 99, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 99, "usage_type": "name"}, {"api_name": "klio_audio.transforms.io.GcsUploadPlot", "line_number": 100, "usage_type": "call"}, {"api_name": "klio_audio.transforms.io", "line_number": 100, "usage_type": "name"}, {"api_name": "klio_audio.transforms.audio.GetSpec", "line_number": 105, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 105, "usage_type": "name"}, {"api_name": "klio_audio.transforms.audio.SpecToPlot", "line_number": 106, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 106, "usage_type": "name"}, {"api_name": "klio_audio.transforms.io.GcsUploadPlot", "line_number": 107, "usage_type": "call"}, {"api_name": "klio_audio.transforms.io", "line_number": 107, "usage_type": "name"}, {"api_name": "klio_audio.transforms.audio.GetSpec", "line_number": 112, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 112, "usage_type": "name"}, {"api_name": "klio_audio.transforms.audio.SpecToPlot", "line_number": 113, "usage_type": "call"}, {"api_name": "klio_audio.transforms.audio", "line_number": 113, "usage_type": "name"}, {"api_name": "klio_audio.transforms.io.GcsUploadPlot", "line_number": 114, "usage_type": "call"}, {"api_name": "klio_audio.transforms.io", "line_number": 114, "usage_type": "name"}, {"api_name": "apache_beam.Flatten", "line_number": 119, "usage_type": "call"}, {"api_name": "apache_beam.Distinct", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "271213053", "text": "import pygame\nfrom conf.setting import Setting\nfrom classes.ship_changeSpeed import Ship\nfrom script import game_function as gf\n\nsetting = Setting()\n\ndef run_game():\n pygame.init()\n screen = pygame.display.set_mode((setting.width,setting.height))\n pygame.display.set_caption(\"Alien Invasion\")\n # 创建飞船\n ship = Ship(screen , setting)\n while True:\n # 点击按键之后不动使飞机能够一直移动\n gf.check_events_keepMove(ship)\n ship.update()\n gf.update_screen(setting , screen , ship)\n\nrun_game()", "sub_path": "script/ship/alien_invasion004_changeSpeed.py", "file_name": "alien_invasion004_changeSpeed.py", "file_ext": "py", "file_size_in_byte": 556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "conf.setting.Setting", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "classes.ship_changeSpeed.Ship", "line_number": 13, "usage_type": "call"}, {"api_name": "script.game_function.check_events_keepMove", "line_number": 16, "usage_type": "call"}, {"api_name": "script.game_function", "line_number": 16, "usage_type": "name"}, {"api_name": "script.game_function.update_screen", "line_number": 18, "usage_type": "call"}, {"api_name": "script.game_function", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "298370620", "text": "# CRC pipeline helpers\nimport pandas as pd\nimport pdb\nfrom PyBiodesy.Fitting import BindingCurve, logspace\nfrom PyBiodesy.integrals import (\n fp,\n deg2rad,\n rad2deg,\n p_SHG_fun,\n s_SHG_fun,\n p_TPF_fun,\n s_TPF_fun,\n E,\n)\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom scipy.stats import linregress\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ncolumn_names_replace = dict(\n zip(\n [\"Plate.row\", \"Plate.col\", \"Molecule.Name\", \"Concentration [uM]\"],\n [\"Row\", \"Column\", \"Source Substance\", \"Source Concentration\"],\n )\n)\n\n\ndef label_rows(df):\n choose_cols = [\"Source Row\", \"Source Column\"]\n well_labels = [\n \"{:s}{:d}\".format(*[row[h] for h in choose_cols])\n for index, row in df[choose_cols].iterrows()\n ]\n return df.set_index(pd.Series(well_labels))\n\n\ndef df_list_to_array(df, repeat_x=True):\n \"\"\" Converts a MultiIndex DataFrame (pivot table) to arrays\n\n The data entries in the input pivot table is meant to be of the type\n 'list', so they are not aggregated / reduced. This is meant to be used\n for getting individual entries that are replicates of the same grouped\n category (e.g. original data set was reduced by passing 'list' to the\n `aggfunc` argument of `df.pivot_table`)\n\n The input DataFrame must have the following arrangements:\n\n ____________COLUMNS___________\n ________________________________________| ch#1 | ch#2 | ch#3 | ch#4 |\n Source Substance | Source Concentration | | | | |\n -----------------|----------------------|----------------------------|\n dmso | 0.00 | [..] | [..] | [..] | [..] |\n | | [..] | [..] | [..] | [..] |\n | | [..] | [..] | [..] | [..] |\n | | [..] | [..] | [..] | [..] |\n cmpd1 | 0.13 | [..] | [..] | [..] | [..] |\n | 0.26 | [..] | [..] | [..] | [..] |\n | 0.52 | [..] | [..] | [..] | [..] |\n | 1.04 | [..] | [..] | [..] | [..] |\n\n The returned output is a dictionary with keys derived from items in the\n first column, (e.g. \"Source Substance\"), followed by an array of\n \"Source Concentration\"\n\n \"\"\"\n\n # number of rows/index\n Nindex = len(df.index.levels)\n\n if Nindex != 2:\n raise ValueError(\"There must be 2 exactly 2 row levels.\")\n\n ret_dict = {}\n\n index_names = df.index.names\n\n # go through each 'Compound' at level 0\n for level_name in df.index.get_level_values(0).unique():\n wrk_dict = {}\n for channel_name in df.columns.tolist():\n # get one column, 'Channel', from the sliced DataFrame\n subdf = df.xs(level_name, level=0, axis=0)[channel_name]\n # the remaining level\n x = subdf.index.to_numpy()\n y = np.array([x for x in subdf])\n # column of the array is the # of replicates\n if repeat_x:\n x = np.repeat(x[:, np.newaxis], y.shape[1], axis=1)\n # assign to dictionary\n wrk_dict[index_names[1]] = x\n wrk_dict[channel_name] = y\n # back to 'Compound' loop\n ret_dict[level_name] = wrk_dict\n\n return ret_dict\n\n\ndef fit_data_set_v2(df, output_pdf):\n # input dataframe must not have test compounds, otherwise\n # the CRC fitting would fail because the concentrations are constant\n data_headers = [\n \"%ΔP-SHG\",\n \"%ΔS-SHG\",\n \"P-FLcorr\",\n \"S-FLcorr\",\n \"SHGratio\",\n \"TPFratio\",\n \"Angle\",\n \"distribution\",\n ]\n\n header_locs = [\n (0, 0),\n (0, 1),\n (1, 0),\n (1, 1),\n (2, 0),\n (2, 1),\n (3, 0),\n (3, 1),\n ]\n\n ax_coords = list(zip(data_headers, header_locs))\n\n txtboxfmt = r\"$K_D = {:6.2f} \\pm {:6.2f} \\mu M, ({:5.1f}\\%)$\"\n boxprop = {\"boxstyle\": \"round\", \"facecolor\": \"wheat\", \"alpha\": 0.25}\n rep_color = (\"#286ede\", \"#0fa381\")\n\n with PdfPages(output_pdf) as mpdf:\n\n for compound, subdf in df.groupby([\"Source Substance\"]):\n print(\"\\rWorking on compound ... {:s}\".format(compound), end=\"\")\n source_plates = subdf[\"Source Plate ID\"].unique().tolist()\n # set1\n subset1 = subdf[subdf[\"Source Plate ID\"] == source_plates[0]].copy()\n subset2 = subdf[subdf[\"Source Plate ID\"] == source_plates[1]].copy()\n subset1.sort_values(\"Source Concentration\", inplace=True)\n subset2.sort_values(\"Source Concentration\", inplace=True)\n fig, ax = plt.subplots(nrows=4, ncols=2, figsize=(8.5, 11))\n fig.suptitle(compound)\n\n for channel, (i, j) in ax_coords:\n x1 = subset1[\"Source Concentration\"].values\n x2 = subset2[\"Source Concentration\"].values\n y1 = subset1[channel].values\n y2 = subset2[channel].values\n\n legend_item1, = ax[i, j].plot(\n x1,\n y1,\n \".\",\n c=rep_color[0],\n label=\"{:s}\".format(source_plates[0]),\n )\n legend_item2, = ax[i, j].plot(\n x1,\n y2,\n \".\",\n c=rep_color[1],\n label=\"{:s}\".format(source_plates[1]),\n )\n\n ax[i, j].plot(x1, (y1 + y2) / 2.0, \"o\", c=\"#fa880f\")\n\n # now do fitting\n x_rep = np.array([x1, x2])\n y_rep = np.array([y1, y2])\n\n if channel in [\"%ΔP-SHG\", \"%ΔS-SHG\"]:\n # for SHG signals\n binding_model = BindingCurve.Hyperbolic(\n x_rep.ravel(), y_rep.ravel(), modality=\"SHG\"\n )\n try:\n binding_model.fit()\n except ValueError:\n binding_model = None\n else:\n # for anything else\n binding_model = BindingCurve.Hyperbolic(\n x_rep.ravel(), y_rep.ravel(), modality=\"TPF\"\n )\n try:\n binding_model.fit()\n except ValueError:\n binding_model = None\n\n # fitting done, overlay the result\n if binding_model is not None and binding_model.optres.success:\n xsmooth = logspace(x1.min(), x1.max())\n ax[i, j].plot(\n xsmooth, binding_model.model(xsmooth), \"k-\", lw=2\n )\n\n Kd_value = binding_model.optres.params[\"Kd\"].value\n Kd_stderr = binding_model.optres.params[\"Kd\"].stderr\n\n if Kd_value is None or Kd_stderr is None:\n txtstr = None\n else:\n Kd_error = 100.0 * (Kd_stderr / Kd_value)\n txtstr = txtboxfmt.format(Kd_value, Kd_stderr, Kd_error)\n else:\n txtstr = None\n\n if txtstr is not None:\n ax[i, j].text(\n 0.05,\n 0.8,\n txtstr,\n transform=ax[i, j].transAxes,\n fontsize=9,\n bbox=boxprop,\n )\n\n ax[i, j].set_xscale(\"log\")\n ax[i, j].set_ylabel(\"{:s}\".format(channel))\n\n fig.legend(\n [legend_item1, legend_item2],\n labels=[legend_item1.get_label(), legend_item2.get_label()],\n fancybox=True,\n shadow=True,\n bbox_to_anchor=(0.7, 0.88, 0.15, 0.1),\n )\n\n mpdf.savefig()\n plt.close()\n\n\ndef fit_data_set(data, output_pdf, key_pair):\n \"\"\" Curve fitting pipeline for June 2019 hit follow-up \"\"\"\n compound_list = list(data.keys())\n\n # ctrl_compounds = [c for c in compound_list if c.startswith('ctrl_')]\n test_compounds = [c for c in compound_list if not c.startswith(\"ctrl_\")]\n\n duplicates = key_pair\n data_headers = [\n \"%ΔP-SHG\",\n \"%ΔS-SHG\",\n \"P-FLcorr\",\n \"S-FLcorr\",\n \"SHGratio\",\n \"TPFratio\",\n \"Angle\",\n \"distribution\",\n ]\n header_locs = [\n (0, 0),\n (0, 1),\n (1, 0),\n (1, 1),\n (2, 0),\n (2, 1),\n (3, 0),\n (3, 1),\n ]\n ax_coords = list(zip(data_headers, header_locs))\n\n # data is now grouped for fitting\n txtboxfmt = r\"$K_D = {:.2f} \\pm {:.2f} \\mu M, ({:.1f}\\%)$\"\n boxprop = {\"boxstyle\": \"round\", \"facecolor\": \"wheat\", \"alpha\": 0.25}\n rep_color = (\"#286ede\", \"#0fa381\")\n compound_Kd_dict = {}\n\n with PdfPages(output_pdf) as mpdf:\n for compound in test_compounds:\n print(\"Processing ... {:s}\".format(compound))\n Kd_dict = {}\n fig, ax = plt.subplots(nrows=4, ncols=2, figsize=(8.5, 11))\n fig.suptitle(compound)\n sub_data = data[compound]\n for channel, (i, j) in ax_coords:\n x_combined = []\n y_combined = []\n legend_items = []\n\n for plate_ID in duplicates:\n x_data = sub_data[plate_ID][\"Source Concentration\"]\n y_data = sub_data[plate_ID][channel]\n x_combined.append(x_data)\n y_combined.append(y_data)\n legend_item, = ax[i, j].plot(\n x_data,\n y_data,\n \".\",\n c=rep_color[plate_ID == duplicates[0]],\n label=\"{:s}\".format(plate_ID),\n )\n legend_items.append(legend_item)\n\n # do fitting\n x_arr = np.array(x_combined)\n y_arr = np.array(y_combined)\n x_mean = x_combined[0]\n y_mean = (y_combined[0] + y_combined[1]) / 2.0\n ax[i, j].plot(x_mean, y_mean, \"o\", c=\"#fa880f\")\n\n if channel in [\"%ΔP-SHG\", \"%ΔS-SHG\"]:\n binding_model = BindingCurve.Hyperbolic(\n x_arr.ravel(), y_arr.ravel(), modality=\"SHG\"\n )\n try:\n binding_model.fit()\n except ValueError:\n binding_model = None\n else:\n\n binding_model = BindingCurve.Hyperbolic(\n x_arr.ravel(), y_arr.ravel(), modality=\"TPF\"\n )\n try:\n binding_model.fit()\n except ValueError:\n binding_model = None\n\n if binding_model is not None and binding_model.optres.success:\n # do filtering here for fitting quality\n xsmooth = logspace(x_arr.min(), x_arr.max())\n ax[i, j].plot(\n xsmooth, binding_model.model(xsmooth), \"k-\", lw=2\n )\n Kd_value = binding_model.optres.params[\"Kd\"].value\n Kd_stderr = binding_model.optres.params[\"Kd\"].stderr\n Kd_error = 100.0 * (Kd_stderr / Kd_value)\n Kd_CV = Kd_stderr / Kd_value\n txtstr = txtboxfmt.format(Kd_value, Kd_stderr, Kd_error)\n Kd_dict[channel] = [Kd_value, Kd_stderr]\n else:\n txtstr = None\n Kd_dict[channel] = [np.nan, np.nan]\n\n if txtstr is not None:\n ax[i, j].text(\n 0.05,\n 0.8,\n txtstr,\n transform=ax[i, j].transAxes,\n fontsize=9,\n bbox=boxprop,\n )\n ax[i, j].set_xlim([1e-2, 25.0])\n ax[i, j].set_xscale(\"log\")\n ax[i, j].set_ylabel(\"{:s}\".format(channel))\n\n fig.legend(\n legend_items[0:2],\n labels=[i.get_label() for i in legend_items[0:2]],\n fancybox=True,\n shadow=True,\n bbox_to_anchor=(0.7, 0.9, 0.15, 0.1),\n )\n\n mpdf.savefig()\n plt.close()\n\n compound_Kd_dict[compound] = Kd_dict\n\n return compound_Kd_dict\n\n\ndef eval_processed(\n df,\n lb_fun,\n ub_fun,\n pSHG_unlabeled=6550.0,\n sSHG_unlabeled=770.0,\n inplace=True,\n):\n \"\"\" evaluates processed DataFrames for failure modes\n\n Two failure modes are considered: SHG background correction and angular\n solution.\n\n Args:\n lb_fun(callable): callable with signature lb_fun(x), where x is the\n TPF ratio. This should return the lower-bound for SHG ratio.\n ub_fun(callable): callable with signature ub_fun(x), where x is the\n TPF ratio. This should return the upper-bound for SHG ratio.\n pSHG_unlabeled(float): this is the value for unlabeled protein attached\n to the surface in the P-SHG channel.\n sSHG_unlabeled(float): this is the value for unlabeled protein attached\n to the surface in the S-SHG channel.\n\n \"\"\"\n shg_bg_fails = []\n solvable = []\n series_id = []\n\n for i, item in df[[\"P-SHG\", \"S-SHG\", \"SHGratio\", \"TPFratio\"]].iterrows():\n if item[\"P-SHG\"] <= pSHG_unlabeled or item[\"S-SHG\"] <= sSHG_unlabeled:\n shg_bg_fails.append(True)\n # if background can't be corrected, we also can't get angles\n # because we have no SHG ratio\n solvable.append(False)\n else:\n # passed background subtraction check, now check for solutions\n shg_bg_fails.append(False)\n\n x, y = item[\"TPFratio\"], item[\"SHGratio\"]\n if lb_fun(x) < y < ub_fun(x):\n solvable.append(True)\n else:\n solvable.append(False)\n\n series_id.append(i)\n\n if inplace:\n df[\"SHG_BG_fail\"] = pd.Series(shg_bg_fails, index=series_id)\n df[\"Angular_solution\"] = pd.Series(solvable, index=series_id)\n return None\n else:\n copied_df = df.copy()\n copied_df[\"SHG_BG_fail\"] = pd.Series(shg_bg_fails, index=series_id)\n copied_df[\"Angular_solution\"] = pd.Series(solvable, index=series_id)\n return copied_df\n\n\ndef simulate_data(angle, distribution):\n \"\"\" compute 4-channel quantities for a given angle and distribution\n\n Args:\n angle(float): mean tilt angle of the probe.\n distribution(float): distribution width or standard deviation.\n\n Returns:\n **2, **2, , \n \"\"\"\n θ = np.linspace(0.0, np.pi, num=500)\n I_ppp = E(θ, deg2rad(angle), deg2rad(distribution), p_SHG_fun) ** 2\n I_pss = E(θ, deg2rad(angle), deg2rad(distribution), s_SHG_fun) ** 2\n F_pp = E(θ, deg2rad(angle), deg2rad(distribution), p_TPF_fun)\n F_ss = E(θ, deg2rad(angle), deg2rad(distribution), s_TPF_fun)\n return I_ppp, I_pss, F_pp, F_ss\n\n\ndef screen_qc(grp, neg_ctrl_pos, pos_ctrl_pos):\n negative_ctrl = grp.loc[\n grp[\"Well coordinates\"].isin(neg_ctrl_pos), \"%SHG change\"\n ]\n positive_ctrl = grp.loc[\n grp[\"Well coordinates\"].isin(pos_ctrl_pos), \"%SHG change\"\n ]\n\n zprime = 1 - (\n 3\n * (positive_ctrl.std() + negative_ctrl.std())\n / (positive_ctrl.mean() - negative_ctrl.mean())\n )\n\n zrobust = 1 - (\n 3\n * (positive_ctrl.mad() - negative_ctrl.mad())\n / (positive_ctrl.median() - negative_ctrl.median())\n )\n\n retdict = {\n \"Z-prime\": zprime,\n \"robust Z-prime\": zrobust,\n }\n\n return pd.Series(retdict)\n\n\ndef compound_summarizer(grp):\n \"\"\" Binding-curve based aggregator and calculate all quality metrics\n\n This function is mean to be used as part of a chained operation from\n a dataframe. Otherwise, it takes a dataframe with all of the 'primary' and\n 'secondary' quantities (observation, corrected observation, or derived\n quantities like angles and distribution)\n\n Example use::\n\n data = pd.read_excel('./Analysis/All_compounds_summary.xlsx')\n # summarize non control datasets\n\n # dont analyze control compounds\n test_set = [cmpd for cmpd in data['Source Substance'].unique().tolist()\n if not cmpd.startswith('ctrl_')]\n test_df = data[data['Source Substance'].isin(test_set)]\n\n # here test_df is a DataFrame containing columns.\n # The sub-indices are retained in the output, keeping the 'Source Plate\n # ID' separate in the output.\n summarized_df = (\n test_df.set_index(['Source Substance'])\n .groupby(by=['Source Substance'], axis=0)\n .apply(compound_summarizer)\n )\n\n \"\"\"\n\n x = grp[\"Source Concentration\"]\n y1 = grp[\"%ΔP-SHG\"]\n y2 = grp[\"%ΔS-SHG\"]\n y3 = grp[\"P-FLcorr\"]\n y4 = grp[\"S-FLcorr\"]\n\n N_proc_fail = grp[\"SHG_BG_fail\"].sum()\n N_solved = grp[\"Angular_solution\"].sum()\n Ndata = grp[\"SHG_BG_fail\"].count()\n\n # data fitting bit\n m1 = BindingCurve.Hyperbolic(x.values, y1.values, modality=\"SHG\")\n m1.fit()\n\n m2 = BindingCurve.Hyperbolic(x.values, y2.values, modality=\"SHG\")\n m2.fit()\n\n m3 = BindingCurve.Hyperbolic(x.values, y3.values, modality=\"TPF\")\n m3.fit()\n\n m4 = BindingCurve.Hyperbolic(x.values, y4.values, modality=\"TPF\")\n m4.fit()\n\n def _CV_score(x, y):\n cv = x / y if x is not None else 1e8\n return cv\n\n Kd1, Kd1_CV = (\n m1.optres.params[\"Kd\"].value,\n _CV_score(m1.optres.params[\"Kd\"].stderr, m1.optres.params[\"Kd\"].value),\n )\n\n Kd2, Kd2_CV = (\n m2.optres.params[\"Kd\"].value,\n _CV_score(m2.optres.params[\"Kd\"].stderr, m2.optres.params[\"Kd\"].value),\n )\n\n Kd3, Kd3_CV = (\n m3.optres.params[\"Kd\"].value,\n _CV_score(m3.optres.params[\"Kd\"].stderr, m3.optres.params[\"Kd\"].value),\n )\n\n Kd4, Kd4_CV = (\n m4.optres.params[\"Kd\"].value,\n _CV_score(m4.optres.params[\"Kd\"].stderr, m4.optres.params[\"Kd\"].value),\n )\n\n # Physicality score bit\n vec_angles = grp[\"Angle\"]\n vec_dists = grp[\"distribution\"]\n\n # also fit a curve to the angle and distribution\n m5 = BindingCurve.Hyperbolic(x.values, vec_angles.values, modality=\"TPF\")\n m5.fit()\n m6 = BindingCurve.Hyperbolic(x.values, vec_dists.values, modality=\"TPF\")\n m6.fit()\n\n Kd5, Kd5_CV = (\n m5.optres.params[\"Kd\"].value,\n _CV_score(m5.optres.params[\"Kd\"].stderr, m5.optres.params[\"Kd\"].value),\n )\n\n Kd6, Kd6_CV = (\n m6.optres.params[\"Kd\"].value,\n _CV_score(m6.optres.params[\"Kd\"].stderr, m6.optres.params[\"Kd\"].value),\n )\n\n calc_Ippp = np.zeros(Ndata)\n calc_Ipss = np.zeros(Ndata)\n calc_Fpp = np.zeros(Ndata)\n calc_Fss = np.zeros(Ndata)\n\n # convert angles to\n # compute intensities from angles and distribution\n # convert to numpy to facilitate numerical indexing\n vec_angles = vec_angles.values\n vec_dists = vec_dists.values\n\n for n in range(Ndata):\n if ~np.isnan(vec_angles[n]) and ~np.isnan(vec_dists[n]):\n calc_Ippp[n], calc_Ipss[n], calc_Fpp[n], calc_Fss[\n n\n ] = simulate_data(vec_angles[n], vec_dists[n])\n else:\n calc_Ippp[n] = calc_Ipss[n] = calc_Fpp[n] = calc_Fss[n] = np.nan\n\n mask1 = ~np.isnan(calc_Ippp) & ~np.isnan(grp[\"P-SHGcorr\"])\n mask2 = ~np.isnan(calc_Ipss) & ~np.isnan(grp[\"S-SHGcorr\"])\n mask3 = ~np.isnan(calc_Fpp) & ~np.isnan(grp[\"P-FLcorr\"])\n mask4 = ~np.isnan(calc_Fss) & ~np.isnan(grp[\"S-FLcorr\"])\n\n m1, b1, rval1, pval1, m_err1 = linregress(\n calc_Ippp[mask1], grp[\"P-SHGcorr\"][mask1]\n )\n m2, b2, rval2, pval2, m_err2 = linregress(\n calc_Ipss[mask1], grp[\"S-SHGcorr\"][mask2]\n )\n m3, b3, rval3, pval3, m_err3 = linregress(\n calc_Fpp[mask1], grp[\"P-FLcorr\"][mask3]\n )\n m4, b4, rval4, pval4, m_err4 = linregress(\n calc_Fss[mask1], grp[\"S-FLcorr\"][mask4]\n )\n\n logprob_physical = (\n (1.0 - rval1) ** 2\n + (1.0 - rval2) ** 2\n + (1.0 - rval3) ** 2\n + (1.0 - rval4) ** 2\n )\n\n # all rvalues independent prod(prob(rvals)), most stringent\n prob_physical_v1 = np.exp(-logprob_physical)\n # prob(rval), either one (most relaxed). Uniform averaging for all\n # probabilities\n prob_physical_v2 = (\n np.exp(-(1 - rval1) ** 2)\n + np.exp(-(1 - rval2) ** 2)\n + np.exp(-(1 - rval3) ** 2)\n + np.exp(-(1 - rval4) ** 2)\n ) / 4.0\n\n # prob(SHG) and prob(FL), middle\n prob_physical_v3 = (\n (np.exp(-(1 - rval1) ** 2) + np.exp(-(1 - rval2) ** 2))\n / 2.0\n * (np.exp(-(1 - rval3) ** 2) + np.exp(-(1 - rval4) ** 2))\n / 2.0\n )\n\n retdict = {\n \"Kd (P-SHG)\": Kd1,\n \"Kd (S-SHG)\": Kd2,\n \"Kd (P-FLcorr)\": Kd3,\n \"Kd (S-FLcorr)\": Kd4,\n \"Kd (Angles)\": Kd5,\n \"Kd (distribution)\": Kd6,\n \"CV (P-SHG)\": Kd1_CV,\n \"CV (S-SHG)\": Kd2_CV,\n \"CV (P-FLcorr)\": Kd3_CV,\n \"CV (S-FLcorr)\": Kd4_CV,\n \"CV (Angles)\": Kd5_CV,\n \"CV (distribution)\": Kd6_CV,\n \"R (P-SHG)\": rval1,\n \"R (S-SHG)\": rval2,\n \"R (P-FL)\": rval3,\n \"R (S-FL)\": rval4,\n \"Prob(physical)_v1\": prob_physical_v1,\n \"Prob(physical)_v2\": prob_physical_v2,\n \"Prob(physical)_v3\": prob_physical_v3,\n \"#BG fail\": N_proc_fail,\n \"#Solved\": N_solved,\n \"Count\": Ndata,\n }\n\n return pd.Series(retdict, dtype=object)\n\n\ndef eval_scores(grp):\n \"\"\" an aggregator function to incorporate replicate error into quality scores\n\n Usage ::\n\n # read all of the compile dataset\n data = pd.read_excel(\"./Processed/All_experiments.xlsx\", index_col=0)\n\n # index data by compound and plate ID\n indexed_data = data\\\n .set_index(['Source Substance', 'Source Plate ID'])\n .sort_values(by=[\"Source Substance\",\"Source Plate ID\"])\n\n data_scores = indexed_data\\\n .groupby(['Source Substance'])\\\n .apply(eval_scores).unstack()\n\n \"\"\"\n current_compound = grp.index.get_level_values(0)[0]\n # get 'Source Plate ID'\n plate_id = grp.index.get_level_values(\"Source Plate ID\").unique().tolist()\n\n # split dataset according to 'Source Plate ID'\n rep1 = grp.xs(plate_id[0], level=\"Source Plate ID\", axis=0).set_index(\n \"Source Concentration\"\n )\n rep2 = grp.xs(plate_id[1], level=\"Source Plate ID\", axis=0).set_index(\n \"Source Concentration\"\n )\n\n # dictionary of all channels\n ret_scores = {}\n\n # columns containing data to analyze\n data_columns = [\n \"%ΔP-SHG\",\n \"%ΔS-SHG\",\n \"P-FLcorr\",\n \"S-FLcorr\",\n \"SHGratio\",\n \"TPFratio\",\n \"Angle\",\n \"distribution\",\n ]\n\n angle_fails = grp[\"Angle\"].isna().sum()\n total_counts = grp[\"Angle\"].isna().count()\n percent_fail = angle_fails / total_counts\n\n for column in data_columns:\n # reproducibility score\n rep1vals = rep1[column]\n rep2vals = rep2.loc[rep1.index][column]\n\n mask = ~np.isnan(rep1vals) & ~np.isnan(rep2vals)\n m, b, rval, pval, merr = linregress(rep1vals[mask], rep2vals[mask])\n\n if np.isnan(rval):\n print(\n (\n \"Couldn't fit line between replicates \"\n \"in column {:s} of {:s}!\"\n ).format(column, current_compound)\n )\n # Fit a CRC to current column\n # this concatenates along row so 2xN\n x = np.array(\n [\n rep1.index.get_level_values(\"Source Concentration\").to_numpy(),\n rep2.index.get_level_values(\"Source Concentration\").to_numpy(),\n ]\n )\n y = np.array([rep1[column], rep2[column]])\n\n if column == \"SHGratio\":\n m_modality = \"TPF\"\n else:\n m_modality = \"SHG\" if \"SHG\" in column else \"TPF\"\n\n model = BindingCurve.Hyperbolic(\n x.ravel(), y.ravel(), modality=m_modality\n )\n\n model.fit()\n\n if model.optres.errorbars:\n Kd_val = model.optres.params[\"Kd\"].value\n Kd_err = model.optres.params[\"Kd\"].stderr\n Kd_CV = Kd_err / Kd_val\n else:\n Kd_val = 1e6\n Kd_CV = 1e6\n\n # calculate the dynamic range of the data\n data_range = y.max() - y.min()\n\n ret_scores[column] = pd.Series(\n {\n \"dynamic_range\": data_range,\n \"slope\": m,\n \"r-value\": rval,\n \"Kd\": Kd_val,\n \"StdErr(Kd)\": Kd_err,\n \"CV(Kd)\": Kd_CV,\n r\"% solution fail\": percent_fail,\n }\n )\n\n return pd.DataFrame.from_dict(ret_scores, orient=\"columns\")\n\n\ndef plot_physical_scores(df, output_pdf):\n\n txtboxfmt = r\"$R-value: {:.2f}, Error : {:.1f}%$\"\n boxprop = {\"boxstyle\": \"round\", \"facecolor\": \"wheat\", \"alpha\": 0.25}\n\n with PdfPages(output_pdf) as mpdf:\n for compound, subdf in df.groupby([\"Source Substance\"]):\n print(\"\\rWorking on compound ... {:s} \".format(compound), end=\"\")\n\n fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(8.5, 11))\n fig.suptitle(compound)\n\n vec_angles = subdf[\"Angle\"].to_numpy()\n vec_dists = subdf[\"distribution\"].to_numpy()\n\n Ndata = vec_angles.size\n\n calc_Ippp = np.zeros(Ndata)\n calc_Ipss = np.zeros(Ndata)\n calc_Fpp = np.zeros(Ndata)\n calc_Fss = np.zeros(Ndata)\n\n for n in range(Ndata):\n if ~np.isnan(vec_angles[n]) and ~np.isnan(vec_dists[n]):\n calc_Ippp[n], calc_Ipss[n], calc_Fpp[n], calc_Fss[\n n\n ] = simulate_data(vec_angles[n], vec_dists[n])\n else:\n calc_Ippp[n] = calc_Ipss[n] = calc_Fpp[n] = calc_Fss[\n n\n ] = np.nan\n\n mask1 = ~np.isnan(calc_Ippp) & ~np.isnan(subdf[\"P-SHGcorr\"])\n mask2 = ~np.isnan(calc_Ipss) & ~np.isnan(subdf[\"S-SHGcorr\"])\n mask3 = ~np.isnan(calc_Fpp) & ~np.isnan(subdf[\"P-FLcorr\"])\n mask4 = ~np.isnan(calc_Fss) & ~np.isnan(subdf[\"S-FLcorr\"])\n\n m1, b1, rval1, pval1, m_err1 = linregress(\n calc_Ippp[mask1], subdf[\"P-SHGcorr\"][mask1]\n )\n report1 = txtboxfmt.format(rval1, 100.0 * m_err1 / m1)\n\n m2, b2, rval2, pval2, m_err2 = linregress(\n calc_Ipss[mask2], subdf[\"S-SHGcorr\"][mask2]\n )\n report2 = txtboxfmt.format(rval2, 100.0 * m_err2 / m2)\n\n m3, b3, rval3, pval3, m_err3 = linregress(\n calc_Fpp[mask3], subdf[\"P-FLcorr\"][mask3]\n )\n report3 = txtboxfmt.format(rval3, 100.0 * m_err3 / m3)\n\n m4, b4, rval4, pval4, m_err4 = linregress(\n calc_Fss[mask4], subdf[\"S-FLcorr\"][mask4]\n )\n report4 = txtboxfmt.format(rval4, 100.0 * m_err4 / m4)\n\n # plot the xy-data\n ax[0, 0].plot(calc_Ippp, subdf[\"P-SHGcorr\"], \".\")\n ax[0, 1].plot(calc_Ipss, subdf[\"S-SHGcorr\"], \".\")\n ax[1, 0].plot(calc_Fpp, subdf[\"P-FLcorr\"], \".\")\n ax[1, 1].plot(calc_Fss, subdf[\"S-FLcorr\"], \".\")\n # plot the linear regression\n ax[0, 0].plot(calc_Ippp, calc_Ippp * m1 + b1, \"k-\")\n ax[0, 1].plot(calc_Ipss, calc_Ipss * m2 + b2, \"k-\")\n ax[1, 0].plot(calc_Fpp, calc_Fpp * m3 + b3, \"k-\")\n ax[1, 1].plot(calc_Fss, calc_Fss * m4 + b4, \"k-\")\n\n # plot annotations\n ax[0, 0].text(\n 0.05, 0.08, report1, bbox=boxprop, transform=ax[0, 0].transAxes\n )\n ax[0, 0].set_xlabel(r\"calculated $I_{ppp}$\")\n ax[0, 0].set_ylabel(r\"observed $I_{ppp}$\")\n\n ax[0, 1].text(\n 0.05, 0.08, report2, bbox=boxprop, transform=ax[0, 1].transAxes\n )\n\n ax[0, 1].set_xlabel(r\"calculated $I_{pss}$\")\n ax[0, 1].set_ylabel(r\"observed $I_{pss}$\")\n\n ax[1, 0].text(\n 0.05, 0.08, report3, bbox=boxprop, transform=ax[1, 0].transAxes\n )\n\n ax[1, 0].set_xlabel(r\"calculated $F_{pp}$\")\n ax[1, 0].set_ylabel(r\"observed $F_{pp}$\")\n\n ax[1, 1].text(\n 0.05, 0.08, report4, bbox=boxprop, transform=ax[1, 1].transAxes\n )\n\n ax[1, 1].set_xlabel(r\"calculated $F_{ss}$\")\n ax[1, 1].set_ylabel(r\"observed $F_{ss}$\")\n\n mpdf.savefig()\n plt.close()\n", "sub_path": "src/main/python/PyBiodesy/Pipeline.py", "file_name": "Pipeline.py", "file_ext": "py", "file_size_in_byte": 29170, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.Series", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 88, "usage_type": "attribute"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 171, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 171, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 180, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 180, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.logspace", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 295, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 301, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 301, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 310, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 310, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.logspace", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 332, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 356, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 410, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 411, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 415, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 430, "usage_type": "attribute"}, {"api_name": "PyBiodesy.integrals.E", "line_number": 431, "usage_type": "call"}, {"api_name": "PyBiodesy.integrals.p_SHG_fun", "line_number": 431, "usage_type": "argument"}, {"api_name": "PyBiodesy.integrals.deg2rad", "line_number": 431, "usage_type": "call"}, {"api_name": "PyBiodesy.integrals.E", "line_number": 432, "usage_type": "call"}, {"api_name": "PyBiodesy.integrals.s_SHG_fun", "line_number": 432, "usage_type": "argument"}, {"api_name": "PyBiodesy.integrals.deg2rad", "line_number": 432, "usage_type": "call"}, {"api_name": "PyBiodesy.integrals.E", "line_number": 433, "usage_type": "call"}, {"api_name": "PyBiodesy.integrals.p_TPF_fun", "line_number": 433, "usage_type": "argument"}, {"api_name": "PyBiodesy.integrals.deg2rad", "line_number": 433, "usage_type": "call"}, {"api_name": "PyBiodesy.integrals.E", "line_number": 434, "usage_type": "call"}, {"api_name": "PyBiodesy.integrals.s_TPF_fun", "line_number": 434, "usage_type": "argument"}, {"api_name": "PyBiodesy.integrals.deg2rad", "line_number": 434, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 463, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 506, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 506, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 509, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 509, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 512, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 512, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 515, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 515, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 547, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 547, "usage_type": "name"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 549, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 549, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 563, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 564, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 565, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 574, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 579, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 581, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 582, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 584, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 586, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 589, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 592, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 607, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 611, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 612, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 613, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 614, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 619, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 621, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 650, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 707, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 708, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 710, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 719, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 725, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve.Hyperbolic", "line_number": 732, "usage_type": "call"}, {"api_name": "PyBiodesy.Fitting.BindingCurve", "line_number": 732, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 749, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 761, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 761, "usage_type": "attribute"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 769, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 773, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 773, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 781, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 783, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 784, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 787, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 794, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 796, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 797, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 798, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 799, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 801, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 806, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 811, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 816, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 861, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 861, "usage_type": "name"}]} +{"seq_id": "105737715", "text": "from django.shortcuts import render, redirect, reverse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n@login_required\ndef view_cart(request):\n \"\"\"A View that renders the cart contents page\"\"\"\n return render(request, \"cart.html\")\n\n@login_required\ndef add_to_cart(request, id):\n \"\"\"Add a donation for a feature to the cart\"\"\"\n \n donation = int(request.POST.get('donation'))\n cart = request.session.get('cart', {})\n if id in cart:\n cart[id] = cart[id] + donation\n else:\n cart[id] = cart.get(id, donation)\n\n request.session['cart'] = cart\n messages.success(request, f'Donation added, ready for checkout')\n return redirect(reverse('get_features'))\n\n@login_required\ndef delete_from_cart(request, id):\n \"\"\"\n Deletes the donation from the cart\n \"\"\"\n cart = request.session.get('cart', {})\n\n cart.pop(id)\n \n request.session['cart'] = cart\n return redirect(reverse('view_cart'))", "sub_path": "cart/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 987, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.render", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 5, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "610887861", "text": "from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib import auth, messages\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect\nfrom django import forms\nfrom django.shortcuts import render, redirect, get_object_or_404\n\nfrom accounts.forms import UserModelForm, ProfileModelForm, LoginForm\n\n@login_required\n@transaction.atomic\ndef update_profile(request):\n print('update_profile() request.method',request.method)\n context = {}\n if request.method == 'POST':\n user_form = UserModelForm(request.POST, instance=request.user)\n profile_form = ProfileModelForm(request.POST, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, ('Your profile was successfully updated!'))\n return redirect('account_profile')\n else:\n print()\n print('error, user_form',user_form.cleaned_data)\n print('error, profile_form',profile_form.cleaned_data)\n messages.error(request, ('Please correct the error below.'))\n else:\n user_form = UserModelForm(instance=request.user)\n profile_form = ProfileModelForm(instance=request.user.profile)\n id_ = request.user.id\n u = get_object_or_404(User, id=id_)\n context['projects'] = 'datasets I own or collaborate on'\n context['comments'] = 'get comments associated with projects I own'\n context['groups'] = u.groups.values_list('name',flat=True)\n\n return render(request, 'accounts/profile.html', {\n 'user_form': user_form,\n 'profile_form': profile_form,\n 'context': context\n })\n\ndef register(request):\n if request.method == 'POST':\n if request.POST['password1'] == request.POST['password2']:\n try:\n User.objects.get(username=request.POST['username'])\n return render(request, 'accounts/register.html', {'error': 'User name is already taken'})\n except User.DoesNotExist:\n #print('request.POST',request.POST)\n user = User.objects.create_user(\n request.POST['username'], \n password=request.POST['password1'],\n email=request.POST['email'],\n first_name=request.POST['first_name'],\n last_name=request.POST['last_name']\n )\n user.profile.affiliation=request.POST['affiliation']\n #user.profile.user_type=request.POST['user_type']\n user.profile.name=request.POST['name']\n auth.login(request, user)\n return redirect('home')\n else:\n return render(request, 'accounts/register.html', {'error': 'Sorry, password mismatch!'})\n else:\n return render(request, 'accounts/register.html')\n\ndef login(request):\n if request.method == 'POST':\n user = auth.authenticate(username=request.POST['username'],password=request.POST['password'])\n\n if user is not None:\n auth.login(request,user)\n return redirect('dashboard')\n else:\n raise forms.ValidationError(\"Sorry, that login was invalid. Please try again.\")\n #return redirect('home', {'error': 'username or password is incorrect :^('})\n else:\n return render(request, 'accounts/login.html')\n\ndef login_view(request):\n form = LoginForm(request.POST or None)\n if request.POST and form.is_valid():\n user = form.login(request)\n if user:\n login(request, user)\n return HttpResponseRedirect(\"/\")# Redirect to a success page.\n return render(request, 'accounts/login.html', {'login_form': form })\n\ndef logout(request):\n if request.method == 'POST':\n auth.logout(request)\n return redirect('home')\n\n", "sub_path": "accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "accounts.forms.UserModelForm", "line_number": 17, "usage_type": "call"}, {"api_name": "accounts.forms.ProfileModelForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 28, "usage_type": "name"}, {"api_name": "accounts.forms.UserModelForm", "line_number": 30, "usage_type": "call"}, {"api_name": "accounts.forms.ProfileModelForm", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 50, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 52, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 52, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 62, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 71, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 75, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 77, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 77, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "accounts.forms.LoginForm", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 89, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 93, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 93, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "409175898", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom tabulate import tabulate\n\nimport TimedTrie\nimport TrieNode\nfrom Utility import Util\n\n\nclass EventPredictor:\n\n def __init__(self, params, timedTrie: TrieNode, timed_trace, timed_trie_model: TimedTrie):\n self.VARIABILITY_OF_LOOKBACK = getattr(params, 'variable_lookback', True)\n self.PLOT_PROBABILITY_ALPBHABET_MAX_LENGTH = getattr(params, 'ignore_event_prob_plot_len_above',\n 15); # Plot fails for large alpbhabet size\n\n self.timed_trie_model = timed_trie_model\n self.timed_trace = timed_trace # the entire trace\n self.Alphabets = sorted(list(set(self.timed_trace[0])))\n self.timedTrie = timedTrie\n\n def evaluate_time_probability_using_count(self):\n for index, val in enumerate(self.time_probability):\n _count_list_total = sum(val)\n if _count_list_total > 0:\n _prob_list = [round(float(e / _count_list_total), 2) for e in val]\n self.time_probability[index] = _prob_list\n\n def fill_time_probability_mat(self, root: TrieNode, sub_sub_trace):\n pos = 0;\n sub_sub_trace_timeShifted = Util.getTimeShift(sub_sub_trace[1])\n\n while pos < len(sub_sub_trace[0]):\n _e, _t = sub_sub_trace[0][pos], sub_sub_trace_timeShifted[pos]\n for _child in root.children:\n if _child.char == _e and _child.t_min <= _t <= _child.t_max:\n root = _child\n if pos == len(sub_sub_trace[0]) - 1:\n # Now fill the\n # for ___child in _child.children:\n for time_step in list(range(len(self.time_probability))):\n for __child in _child.children:\n all_time_transitions = __child.t_list\n _pos_of_char_in_Alphabet = self.Alphabets.index(__child.char)\n _count = len(list(filter(lambda _x: _x == time_step + 1, all_time_transitions)))\n self.time_probability[time_step][_pos_of_char_in_Alphabet] += _count\n\n break; # break for\n\n pos += 1\n\n return\n\n def fill_time_probability_mat_on_lookBack(self, sub_sub_trace, variable_lookback: bool = True):\n\n subTrace_len = len(sub_sub_trace[0])\n root = self.timedTrie\n\n for _k in list(range(subTrace_len, 0, -1)):\n if variable_lookback == False and _k < subTrace_len:\n break;\n\n sub_sub_trace_event = sub_sub_trace[0][-_k:]\n sub_sub_trace_time = sub_sub_trace[1][-_k:]\n\n sub_sub_trace_timeEvent = (sub_sub_trace_event, sub_sub_trace_time)\n\n if self.timed_trie_model.ENABLE_DEBUG:\n print(\"k \", str(_k), \" filling --- \", sub_sub_trace_timeEvent)\n\n self.fill_time_probability_mat(root, sub_sub_trace_timeEvent)\n\n if self.timed_trie_model.ENABLE_DEBUG:\n print(\"Count list \", self.time_probability)\n self.evaluate_time_probability_using_count()\n\n def predict(self, subtrace_timed):\n self.tp_max = Util.get_time_length_based_on_lookback(self.timedTrie, subtrace_timed, self.VARIABILITY_OF_LOOKBACK) # Look into this\n self.time_probability = np.zeros((self.tp_max, len(self.Alphabets)))\n\n self.fill_time_probability_mat_on_lookBack(subtrace_timed)\n\n time_probability_DF = pd.DataFrame(self.time_probability, columns=self.Alphabets, index=list(range(1, self.tp_max + 1)))\n time_probability_DF.index.name = \"Time (t)\"\n print(tabulate(time_probability_DF, headers='keys', tablefmt='psql'))\n\n \"\"\"#Plot Area Timed_Area graph\"\"\"\n\n def plot_time_probability(self, save_figure=False):\n\n if len(self.Alphabets) > self.PLOT_PROBABILITY_ALPBHABET_MAX_LENGTH:\n print(\"Skipping plotting of probability graph since alphabet size more than {0}\".format(\n self.timedTrie.PLOT_PROBABILITY_ALPBHABET_MAX_LENGTH))\n else:\n y = np.vstack(self.time_probability)\n x = np.arange(len(self.Alphabets)) # label location\n # barWidth = 0.2\n barWidth = float(1.0 / (2 * len(self.Alphabets)))\n\n fig, ax = plt.subplots()\n ax.set_ylabel('Probability')\n ax.set_xlabel(\"Time (sec)\")\n ax.set_title('Probablity of Event transition over time')\n\n r = np.arange(len(y.T[0]))\n for idx, val in enumerate(y.T):\n _label = self.Alphabets[idx]\n plt.bar(r, val, width=barWidth, label=_label)\n\n # Set position of bar on X axis\n r = [x + barWidth for x in r]\n\n plt.xticks([r + barWidth for r in range(len(y))], list(range(1, len(self.time_probability) + 1)))\n plt.legend()\n\n if save_figure == True:\n plt.savefig(\"./Result/event_prediction_plot.png\")\n\n plt.show()\n\n", "sub_path": "Predictor/EventPredictor.py", "file_name": "EventPredictor.py", "file_ext": "py", "file_size_in_byte": 5068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "Utility.Util.getTimeShift", "line_number": 32, "usage_type": "call"}, {"api_name": "Utility.Util", "line_number": 32, "usage_type": "name"}, {"api_name": "Utility.Util.get_time_length_based_on_lookback", "line_number": 79, "usage_type": "call"}, {"api_name": "Utility.Util", "line_number": 79, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}]} +{"seq_id": "625683516", "text": "# Imports Libraries\r\nimport pygame\r\nimport sys\r\nfrom pygame.locals import *\r\n\r\nWidth = 1600\r\nHeight = 900\r\n\r\n\r\n# Initialise Pygame\r\npygame.init()\r\n\r\n# declare variable 'screen' and set the display window\r\nscreen = pygame.display.set_mode((Width, Height))\r\n\r\n\r\n# Declare variable for image and load it into pygame\r\nBeach = pygame.image.load('Beach.jpg')\r\nscreen.blit(Beach, (0, 0))\r\npygame.display.flip()\r\nPixArray = pygame.PixelArray(screen)\r\n\r\ndef Blackandwhite():\r\n \"\"\" Turns image to black and white\"\"\"\r\n for Y in range(0, Height):\r\n for X in range(0, Width):\r\n\r\n # Gets the RGB Values of the pixel\r\n red = screen.get_at((X, Y)).r\r\n green = screen.get_at((X, Y)).g\r\n blue = screen.get_at((X, Y)).b\r\n if red + green + blue > 200:\r\n red = 0\r\n green = 0\r\n blue = 0\r\n else:\r\n red = 255\r\n green = 255\r\n blue = 255\r\n\r\n # Updates the Pixel Array\r\n PixArray[X, Y] = (red, green, blue)\r\n\r\n\r\nBlackandwhite()\r\n\r\n# Update the full display surface to the screen\r\npygame.display.flip()\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()", "sub_path": "MainResubmission.py", "file_name": "MainResubmission.py", "file_ext": "py", "file_size_in_byte": 1301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pygame.init", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.PixelArray", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "494765064", "text": "from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.models import User\nimport requests, json\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib import messages\nfrom video.models import Torrent\nfrom django.utils.html import escape\nimport sys, webbrowser, bs4\nimport time, urllib.request\nfrom .forms import CommentForm\nfrom django.http import HttpRequest, JsonResponse\nfrom django.db.models import Q, query\nfrom users.models import Profile\nimport uuid\n\ndef list(request):\n #get token to 42\n if not 'code' in request.GET and int(request.user.is_authenticated) == 0:\n return redirect('login')\n if request.GET.get('code'):\n code = request.GET.get('code')\n data = {'grant_type': 'authorization_code', 'client_id': '5123688fe53d089acd8fb1f9bf1bd437e8d4f3628dc5d79b033b357deafbb01a', 'client_secret': '3ede04d9a435c18f69ae2c9e0e91fc486c38349327418f49e7b6910da663903b', 'code': code, 'redirect_uri': 'http://localhost:8000'}\n access_token_response = requests.post(\"https://api.intra.42.fr/oauth/token\", data=data)\n access = json.loads(access_token_response.text)\n #data user access\n headers = {\"Authorization\": 'Bearer ' + str(access['access_token'])}\n req = requests.post('http://api.intra.42.fr/v2/me', headers=headers)\n content = json.loads(req.text)\n if content and 'id' in content:\n if not User.objects.filter(username=content['login']):\n user = User.objects.create_user(content['login'], email=content['email'], password=\"\", first_name=content['first_name'], last_name=content['last_name'])\n token = Token.objects.create(user=user)\n #log user with token\n else:\n user = User.objects.get(username=content['login'])\n userlog = authenticate(request, username=content['login'], password='')\n if userlog is not None:\n login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n else:\n messages.error(request, f'Bad Response HA HA HA')\n return redirect('login')\n return redirect('list')\n inshalah = request.POST.get('tok')\n if request.method == 'POST' and 'tok' in request.POST:\n name = request.user\n id_profile = User.objects.get(username=name).pk\n token = Profile.objects.get(id=id_profile)\n token.token = ''\n token.save()\n req = request.POST.get('val')\n if req :\n filters = req.split(' ')\n if not '|' in filters :\n if filters[0] == 'release' or filters[0] == 'rate':\n filters[0] = '-' + filters[0]\n movies = Torrent.objects.filter(category__contains=filters[1], rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n else: \n movies = Torrent.objects.filter(category__contains=filters[1], rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n elif filters[1] == '|':\n if filters[0] == 'release' or filters[0] == 'rate':\n filters[0] = '-' + filters[0]\n movies = Torrent.objects.filter(rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n else: \n movies = Torrent.objects.filter(rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n return JsonResponse({'movie' : movies})\n else:\n movies = Torrent.objects.order_by('-rate').values()\n genre = []\n tab = []\n name = request.user\n id_profile = User.objects.get(username=name).pk\n bd_already = Profile.objects.filter(id=id_profile).first()\n trysplit = bd_already.already.split()\n for notes in movies:\n cat = notes['category']\n id_gris = notes['id']\n id_gris = str(id_gris)\n for aff in trysplit:\n if (aff == id_gris):\n tab.append(int(aff))\n if cat:\n cat = json.loads(notes['category'])\n for c in cat:\n if not c in genre:\n genre.append(c)\n return render(request, 'video/list.html', {'movies' : movies, 'genre' : genre, 'grey' : tab})\n\n@login_required\ndef search(request):\n inshalah = request.POST.get('tok')\n if request.method == 'POST' and 'tok' in request.POST:\n name = request.user\n id_profile = User.objects.get(username=name).pk\n token = Profile.objects.get(id=id_profile)\n token.token = ''\n token.save()\n if request.method == 'POST' and not 'val' in request.POST:\n film_search = request.POST[\"film_search\"]\n film_search = escape(film_search)\n film_search = film_search.replace(' ', '%20')\n film_search = film_search.strip()\n address = 'https://yts.lt/browse-movies/' + film_search + '/all/all/0/latest'\n result = requests.get(address)\n try :\n result.raise_for_status()\n except Exception as exc:\n print('There was a problem: %s' %(exc))\n poupou = bs4.BeautifulSoup(result.text, features=\"lxml\")\n section = poupou.find('section')\n film = [] \n row = section.find('div', class_='row')\n div = row.find_all('div', class_='browse-movie-wrap')\n for di in div:\n a = di.find('a', class_='browse-movie-link')\n img = di.find('img', class_='img-responsive')\n img_alt = img['alt']\n img_alt = img_alt.replace(' download', '')\n film.append([a['href'], img_alt, img['src']])\n for f in film:\n video = requests.get(f[0])\n try :\n result.raise_for_status()\n except Exception as exc:\n print('There was a problem: %s' %(exc))\n vid = bs4.BeautifulSoup(video.text, features='lxml')\n info = vid.find('div', id='movie-info')\n if (info):\n info2 = info.find_all('h2')\n rate = vid.find('span', itemprop=\"ratingValue\").text\n f.append(rate)\n if not Torrent.objects.filter(name = f[1]):\n torrent = Torrent()\n torrent.name = f[1]\n torrent.miniature = f[2]\n torrent.release = info2[0].text\n category = info2[1].text\n cat = category.split('/')\n torrent.category = json.dumps({'category' : cat})\n imdbid = vid.find('a', title=\"IMDb Rating\")\n imdbid = imdbid['href'].replace('https://www.imdb.com/title/', '')\n imdbid = imdbid.replace('/', '')\n torrent.idimdb = imdbid\n torrent.rate = float(rate)\n torrentlk = vid.find('a', class_='magnet-download')\n torrent.magnets= torrentlk['href']\n syn = vid.find('div', id='synopsis')\n if syn:\n torrent.synopsis = syn.find('p').text\n direct = vid.find('div', id='crew')\n if direct:\n director = direct.find('span', itemprop='name')\n if director:\n torrent.director = director.text\n actor = direct.find('div', class_='actors')\n if actor:\n ac = []\n actors = actor.find_all('span', itemprop='name')\n if actors:\n for act in actors:\n ac.append(act.text)\n torrent.actors = json.dumps({'actors' : ac })\n torrent.serie = False\n torrent.save()\n mov = Torrent.objects.only('id').get(name=f[1]).id\n s = Torrent.objects.only('serie').get(name=f[1]).serie\n f.append(mov)\n f.append(s)\n address = 'https://www.imdb.com/find?q=' + film_search + '&s=tt&ttype=tv&exact=true&ref_=fn_tt_ex'\n result = requests.get(address)\n try :\n result.raise_for_status()\n except Exception as exc:\n print('There was a problem: %s' %(exc))\n poupou = bs4.BeautifulSoup(result.text, features=\"lxml\")\n prou = poupou.find('table')\n if prou:\n a = prou.find_all('a')\n if (a):\n blink = []\n for b in a:\n add = b['href'].replace('/title', '')\n link = 'https://tv-v2.api-fetch.website/show' + add\n if not link in blink:\n blink.append(link)\n for l in blink:\n serie = requests.get(l).json()\n if serie and not Torrent.objects.filter(name = serie['title'] + ' (' + serie['year'] + ')') and not serie['title'] + ' (' + serie['year'] + ')' in film:\n torrent = Torrent()\n torrent.name = serie['title'] + ' (' + serie['year'] + ')'\n if 'images' in serie and 'banner' in serie['images']:\n torrent.miniature = serie['images']['banner']\n torrent.magnets = json.dumps(serie['episodes'])\n torrent.release = serie['year']\n torrent.category = json.dumps({'category' : serie['genres']})\n torrent.rate = float(serie['rating']['percentage'] * 10 / 100)\n torrent.synopsis = serie['synopsis']\n torrent.movie_length = serie['runtime']\n torrent.serie = True\n torrent.episodes = json.dumps(serie['episodes'])\n torrent.idimdb = serie['_id']\n torrent.seasons = serie['num_seasons']\n torrent.save()\n mov = Torrent.objects.only('id').get(name = serie['title'] + ' (' + serie['year'] + ')').id\n film.append(['rien', torrent.name, torrent.miniature, torrent.rate, mov, torrent.serie])\n elif serie:\n name = serie['title'] + ' (' + serie['year'] + ')'\n if not name in film:\n miniature = serie['images']['banner']\n rate = float(serie['rating']['percentage'] * 10 / 100)\n mov = Torrent.objects.only('id').get(name = name).id\n film.append(['rien', name, miniature, rate, mov, True])\n genre = []\n search = []\n for notes in film:\n toto = Torrent.objects.get(name=notes[1])\n search.append(toto.pk)\n cat = json.loads(toto.category)\n ca = cat['category']\n for c in ca:\n if not c in genre:\n genre.append(c)\n request.session['search'] = search\n req = request.POST.get('val')\n if req :\n filters = req.split(' ')\n if not '|' in filters :\n if filters[0] == 'release' or filters[0] == 'rate':\n filters[0] = '-' + filters[0]\n movies = Torrent.objects.filter(pk__in=request.session['search'], category__contains=filters[1], rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n else: \n movies = Torrent.objects.filter(pk__in=request.session['search'], category__contains=filters[1], rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n elif filters[1] == '|':\n if filters[0] == 'release' or filters[0] == 'rate':\n filters[0] = '-' + filters[0]\n movies = Torrent.objects.filter(pk__in=request.session['search'], rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n else:\n movies = Torrent.objects.filter(pk__in=request.session['search'], rate__range=[filters[2], filters[3]], release__range=[filters[4], filters[5]]).order_by(filters[0]).values_list()[::1] \n return JsonResponse({'movie' : movies})\n else: \n return render(request, 'video/list.html', {'film' : film, 'genre' : genre})\n\n@login_required\ndef video(request, tittle):\n try:\n req = request.POST.get('pal')\n if request.method == 'POST' and 'pal' in request.POST:\n name = request.user\n id_profile = User.objects.get(username=name).pk\n token = Profile.objects.get(id=id_profile)\n token.token = uuid.uuid4().hex[:12].upper()\n token.save()\n bd_already = Profile.objects.filter(id=id_profile).first()\n toto = bd_already.already\n tata = bd_already.already.strip().split(' ')\n if tittle not in tata:\n bd_already.already = bd_already.already + tittle + ' '\n bd_already.save()\n ids = Torrent.objects.get(pk=tittle).idimdb\n lang = request.user.pk\n toto = Profile.objects.only('language').get(id=lang).language\n inf = {'ids' : ids, 'lan' : toto, 'token' : token.token}\n return JsonResponse({'infos' : inf })\n grey = 0\n inshalah = request.POST.get('tok')\n if request.method == 'POST' and 'tok' in request.POST:\n name = request.user\n id_profile = User.objects.get(username=name).pk\n token = Profile.objects.get(id=id_profile)\n token.token = ''\n token.save()\n if request.method == 'POST' and not 'pal' in request.POST:\n comment_form = CommentForm(request.POST)\n data = comment_form.data['comment']\n if len(data) < 200:\n movie = Torrent.objects.filter(id=tittle).first()\n if not movie.comments:\n movie.comments = json.dumps({'comments': []})\n movie.save()\n tmp = json.loads(movie.comments)\n tmp['comments'].append(request.user.username)\n tmp['comments'].append(data)\n movie.comments = json.dumps(tmp)\n movie.save()\n else:\n comment_form = CommentForm()\n except:\n pass\n try:\n id_film = Torrent.objects.filter(id=tittle).first().name\n miniature = Torrent.objects.filter(id=tittle).first().miniature\n annee = Torrent.objects.filter(id=tittle).first().release\n synopsis = Torrent.objects.filter(id=tittle).first().synopsis\n category = Torrent.objects.filter(id=tittle).first().category\n notes = Torrent.objects.filter(id=tittle).first().rate\n actors = Torrent.objects.filter(id=tittle).first().actors\n act = actors[actors.find('[')+1:actors.find(']')]\n actors = act.replace('\"', '')\n actors = actors.replace(',', ' / ')\n name = request.user\n id_profile = User.objects.get(username=name).pk\n bd_already = Profile.objects.filter(id=id_profile).first()\n trysplit = bd_already.already.split()\n for aff in trysplit:\n if (aff == tittle):\n grey = 1\n if category and ('{' in category or '[' in category):\n category = category[category.find('[')+1:category.find(']')]\n category = category.replace('\"', '')\n category = category.replace(' ,', ' / ')\n context = {\n 'form': comment_form,\n 'grey' : grey,\n 'titre' : id_film,\n 'miniature' : miniature,\n 'annee' : annee,\n 'synopsis' : synopsis,\n 'category' : category,\n 'notes' : notes,\n 'acteurs' : actors\n }\n coms = Torrent.objects.filter(id=tittle).first()\n tmp = None\n if coms.comments:\n tmp = json.loads(coms.comments)\n if tmp:\n comments = []\n authors = []\n id_authors = []\n i = 0\n while i < len(tmp['comments']):\n if i % 2:\n comments.append(tmp['comments'][i])\n else:\n authors.append(tmp['comments'][i])\n id_authors.append(User.objects.filter(username=tmp['comments'][i]).first().id)\n i += 1\n mylist = zip(authors, comments, id_authors)\n context_1 = {\n 'form': comment_form,\n 'comments': mylist,\n 'grey' : grey,\n 'titre' : id_film,\n 'miniature' : miniature,\n 'annee' : annee,\n 'synopsis' : synopsis,\n 'category' : category,\n 'notes' : notes,\n 'acteurs' : actors\n }\n return render(request, 'video/video.html', context_1)\n return render(request, 'video/video.html', context)\n except AttributeError:\n return (redirect('/'))\n\n@login_required\ndef serie(request, title):\n try:\n seasons = Torrent.objects.get(pk=title).serie\n except :\n return redirect('/')\n grey = 0\n count = request.POST.get('yeah')\n if (request.method == 'POST' and 'pal' in request.POST):\n name = request.user\n id_profile = User.objects.get(username=name).pk\n bd_already = Profile.objects.filter(id=id_profile).first()\n toto = bd_already.already\n tata = bd_already.already.strip().split(' ')\n if title not in tata:\n bd_already.already = bd_already.already + title + ' '\n bd_already.save()\n lang = request.user.pk\n name = request.user\n id_profile = User.objects.get(username=name).pk\n token = Profile.objects.get(id=id_profile)\n token.token = uuid.uuid4().hex[:12].upper()\n token.save()\n toto = Profile.objects.only('language').get(id=lang).language\n torrent = Torrent.objects.get(pk=title)\n infos = json.loads(torrent.episodes)\n infos = sorted(infos, key=lambda e: e['episode'])\n name = request.user\n id_profile = User.objects.get(username=name).pk\n bd_already = Profile.objects.filter(id=id_profile).first()\n trysplit = bd_already.already.split()\n for aff in trysplit:\n if (aff == title):\n grey = 1\n saison = []\n for p in infos:\n if not p['season'] in saison:\n saison.append(p['season'])\n saison.sort()\n cat = json.loads(torrent.category)\n cat = cat['category']\n if request.method == 'POST' and 'tok' in request.POST:\n name = request.user\n id_profile = User.objects.get(username=name).pk\n token = Profile.objects.get(id=id_profile)\n token.token = ''\n token.save()\n return render(request, 'video/serie.html', {'tok': token.token, 'toto' : toto, 'infos' : infos, 'season' : saison, 'title' : title, 'torrent' : torrent, 'cat' : cat, 'grey' : grey,})\n\n@login_required\ndef watch_serie(request, title, season, episode):\n return render(request, 'video/video.html')\n\n@login_required\ndef filtre(requests):\n if requests.method == 'POST':\n print(requests.POST.getlist('genre1'))\n return render(requests, 'list.html')", "sub_path": "hypertube/video/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 19727, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 29, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.create", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 34, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 37, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 42, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 48, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.get", "line_number": 49, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 49, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 58, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 60, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 64, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 66, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 66, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 67, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects.order_by", "line_number": 69, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 69, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 73, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.filter", "line_number": 74, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 74, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 88, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 95, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.get", "line_number": 96, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 96, "usage_type": "name"}, {"api_name": "django.utils.html.escape", "line_number": 101, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 105, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 110, "usage_type": "call"}, {"api_name": "video.models", "line_number": 122, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 122, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 127, "usage_type": "call"}, {"api_name": "video.models.text", "line_number": 127, "usage_type": "attribute"}, {"api_name": "video.models", "line_number": 127, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 133, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 133, "usage_type": "name"}, {"api_name": "video.models.Torrent", "line_number": 134, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 140, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 163, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects.only", "line_number": 166, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 166, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.only", "line_number": 167, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 167, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 171, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 176, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 188, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 189, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 189, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 189, "usage_type": "name"}, {"api_name": "video.models.Torrent", "line_number": 190, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 194, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 196, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 201, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects.only", "line_number": 205, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 205, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 205, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.only", "line_number": 212, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 212, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 212, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.get", "line_number": 217, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 217, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 217, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 219, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 231, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 231, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 231, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 233, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 233, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 233, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 237, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 237, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 237, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 239, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 239, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 239, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 240, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 242, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 90, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 250, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 250, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 250, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.get", "line_number": 251, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 251, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 251, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 252, "usage_type": "call"}, {"api_name": "users.models.Profile.objects.filter", "line_number": 254, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 254, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 254, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.get", "line_number": 260, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 260, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 260, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.only", "line_number": 262, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 262, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 262, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 264, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 269, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 269, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 269, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.get", "line_number": 270, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 270, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 270, "usage_type": "name"}, {"api_name": "forms.CommentForm", "line_number": 274, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 277, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 277, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 277, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 279, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 281, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 284, "usage_type": "call"}, {"api_name": "forms.CommentForm", "line_number": 287, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 291, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 291, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 291, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 292, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 292, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 292, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 293, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 293, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 293, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 294, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 294, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 294, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 295, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 295, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 295, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 296, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 296, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 296, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 297, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 297, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 297, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 302, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 302, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 302, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.filter", "line_number": 303, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 303, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 303, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.filter", "line_number": 323, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 323, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 323, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 326, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 337, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 337, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 337, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 352, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 353, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 355, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 244, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.get", "line_number": 360, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 360, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 360, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 362, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 367, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 367, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 367, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.filter", "line_number": 368, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 368, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 368, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 376, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 376, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 376, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.get", "line_number": 377, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 377, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 377, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 378, "usage_type": "call"}, {"api_name": "users.models.Profile.objects.only", "line_number": 380, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 380, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 380, "usage_type": "name"}, {"api_name": "video.models.Torrent.objects.get", "line_number": 381, "usage_type": "call"}, {"api_name": "video.models.Torrent.objects", "line_number": 381, "usage_type": "attribute"}, {"api_name": "video.models.Torrent", "line_number": 381, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 382, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 385, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 385, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 385, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.filter", "line_number": 386, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 386, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 386, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 396, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 400, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 400, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 400, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.get", "line_number": 401, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 401, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 401, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 404, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 357, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 408, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 406, "usage_type": "name"}, {"api_name": "requests.method", "line_number": 412, "usage_type": "attribute"}, {"api_name": "requests.POST.getlist", "line_number": 413, "usage_type": "call"}, {"api_name": "requests.POST", "line_number": 413, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 414, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 410, "usage_type": "name"}]} +{"seq_id": "311077715", "text": "# InfluxDB\nfrom influxdb import InfluxDBClient\nfrom utils.constants import *\n\n\nclass InfluxDBAccessor(object):\n\n def __init__(self):\n \"\"\"\n Initialize\n \"\"\"\n self.client = InfluxDBClient(host=IN_HOST, port=IN_PORT, database=IN_DB)\n self.vehicles = []\n self.fields = []\n\n def query(self, query):\n \"\"\"\n InfluxDBに対し、指定したクエリを実行する\n :param query:\n :return:\n \"\"\"\n print(\"Query: {0}\".format(query))\n results = self.client.query(query=query)\n print(\"Result: {0}\".format(results))\n point_list = list(results.get_points())\n if len(point_list) > 0:\n return list(results.get_points())\n return None\n\n def get_measurement(self, measurement):\n \"\"\"\n 指定したメジャーメントを取得する\n :param measurement:\n :return:\n \"\"\"\n query = 'select * from \"{}\"'.format(measurement)\n return self.query(query)\n\n def get_fields(self, measurement):\n \"\"\"\n 指定したメジャーメントのフィールドを取得する\n :param measurement:\n :return:\n \"\"\"\n if len(self.fields) == 0:\n # results = self.query(\"select * from /.*/ limit 1\")\n results = self.query(\"select * from \\\"{}\\\" limit 1\".format(measurement))\n if results is not None:\n result = results[0]\n self.fields.extend(result.keys())\n return self.fields\n\n def exists_field(self, measurement, field):\n \"\"\"\n 引数で指定したフィールドが存在するか確認\n :param measurement:\n :param field:\n :return:\n \"\"\"\n fields = self.get_fields(measurement)\n return field in fields\n\n def get_vehicles(self, measurement):\n \"\"\"\n 車両名の一覧を取得する\n :param measurement:\n :return:\n \"\"\"\n if len(self.vehicles) == 0:\n fields = self.get_fields(measurement)\n for field in fields:\n split = field.split(\"_\")\n if len(split) < 2:\n continue\n vehicle = split[0]\n if vehicle not in str(self.vehicles):\n self.vehicles.append(vehicle)\n print(\"Vehicles: {0}\".format(self.vehicles))\n return self.vehicles\n\n def write(self, data_):\n \"\"\"\n データの書き込み\n format:\n data_ = [{'fields': {'metric1': 1.0, 'metric2': -1},\n 'measurement': 'garden_sim',\n 'time': datetime.datetime.utcnow(),\n 'tags': {'cat1': 'aaa'}},\n {'fields': {'metric1': 2.0, 'metric2': -2},\n 'measurement': 'garden_sim',\n 'time': datetime.datetime.utcnow(),\n 'tags': {'cat1': 'aaa'}},\n {'fields': {'metric1': 3.0, 'metric2': -3},\n 'measurement': 'garden_sim',\n 'time': datetime.datetime.utcnow(),\n 'tags': {'cat1': 'aaa'}}]\n :param data_:\n :return:\n \"\"\"\n return self.client.write_points(data_)\n", "sub_path": "Zipc_airflow/src/analyzer/utils/influxdb_accessor.py", "file_name": "influxdb_accessor.py", "file_ext": "py", "file_size_in_byte": 3219, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "influxdb.InfluxDBClient", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "93754415", "text": "#! /usr/bin/python\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport subprocess as subp\nimport os\nimport timeit\n\nimport scipy\nfrom scipy.integrate import quad, dblquad, tplquad\nfrom numpy import *\nimport scipy.stats\n\nimport argparse\n\n#############################################################\n# #\n# DOCUMENTATION # \n# #\n#############################################################\n\ndescription = \"This script uses the Masterlist (.mls) file to do the morphological analysis: volume, effective radius, ellipse semiaxes (a,b,c).\"\nepilog = \"At the end, the script stores the info in the INFO (.inf) file and some graphs.\"\nparser = argparse.ArgumentParser(description=description, epilog=epilog)\nparser.add_argument('filein', type=str,\n help='Name of the Observed Catalog file, must be stored in the folder OC_PATH=\"./observed_catalgos/\"')\nparser.add_argument('filenumber', type=int,\n help='The consecutive number of the file, this is necessary to generate the Beta-Skeleton file.')\nparser.add_argument('-b', '--beta', type=float,\n default=1.0,\n help='Beta Skeleton Value, a float value \"b>=1\". Default Value = 1.0')\nparser.add_argument('-n', '--nrand', type=float,\n default=1.0,\n help='The ratio between Number of Random Points and Number of Observational Points (nrand= N_random/N_obs)')\narg = parser.parse_args()\n\nBETA = arg.beta\nnrand = arg.nrand\nOC_FILE_IN = arg.filein\nFILENUM = arg.filenumber\n\n \n#############################################################\n#############################################################\n## ##\n## ##\n## Begins the Main Routine ## \n## ##\n## ##\n#############################################################\n#############################################################\n\n\ndef Plot_Control(VAE_catalog, fig_name, n ):\n n = int(n)\n \n FG_format = \"png\"\n ID = VAE_catalog[:,0] # Void ID\n N = ID.shape[0] # Number of Voids in Catalog\n N_part = VAE_catalog[:,1] # Number of Random Particles in Void\n V = VAE_catalog[:,2] # Void Volume \n r = VAE_catalog[:,3] # r_eff\n a = VAE_catalog[:,4] # Major semi-axe\n b = VAE_catalog[:,5] # Medium semi-axe?\n c = VAE_catalog[:,6] # Minor semi-axe\n\n \n plt.scatter(ID,N_part)\n plt.xlabel(\"Void ID\")\n plt.ylabel(\"Number of Particles\")\n fg_filename = \"n_particles_per_void_{}.{}\".format(fig_name,FG_format) \n plt.savefig(FG_path + fg_filename)\n plt.close()\n \n plt.scatter(ID, r)\n plt.xlabel(\"Void ID\")\n plt.ylabel(\"Radius (Mpc)\")\n fg_filename = \"radius_per_void_{}.{}\".format(fig_name,FG_format)\n plt.savefig(FG_path + fg_filename)\n plt.close()\n \n plt.scatter(N_part,r)\n plt.xlabel(\"Number of Particles\")\n plt.ylabel(\"Radius (Mpc)\")\n fg_filename = \"radius_vs_number_of_particles_{}.{}\".format(fig_name,FG_format)\n plt.savefig(FG_path + fg_filename)\n plt.close()\n\n\ndef PLOT_ANALYSIS(n, ID, N_part, r):\n plt.scatter(ID,N_part)\n plt.xlabel(\"Void ID\")\n plt.ylabel(\"Number of Particles\")\n plt.savefig(fig_path + \"{}_paricles per Void.pdf\".format(filein))\n plt.close()\n \n plt.scatter(ID, r)\n plt.xlabel(\"Void ID\")\n plt.ylabel(\"Radius (Mpc)\")\n plt.savefig(fig_path + \"Planck15 Radius per Void.pdf\")\n plt.close()\n \n plt.scatter(N_part,r)\n plt.xlabel(\"Number of Particles\")\n plt.ylabel(\"Radius (Mpc)\")\n plt.savefig(fig_path + \"Planck15 Radius vs Particle Number.pdf\")\n plt.close() \n\n\ndef diff_volume(p,t,r):\n return r**2*sin(p)\n\n\ndef GET_VOLUME(r1, r2, t1, t2, p1, p2):\n \"\"\" \n VOLUME # In Mpc**3\n Arguments: Radius (low, high), Theta, Phi\n \"\"\"\n ## limits for radius\n # r1 = 0.\n # r2 = 300.\n ## limits for theta\n t1 = t1 * np.pi / 180 # 0\n t2 = t2 * np.pi / 180 # 2*pi\n # limits for phi\n p1 = (90 - p1 ) * np.pi / 180 # 0\n p2 = (90 - p2 ) * np.pi / 180 # pi\n volume = tplquad(diff_volume, r1, r2, lambda r: t1, lambda r: t2,\n lambda r,t: p1, lambda r,t: p2)[0]\n return volume\n\n\n## To do the prolate/oblate scatter plot.\ndef density_estimation(m1, m2):\n X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] \n positions = np.vstack([X.ravel(), Y.ravel()]) \n values = np.vstack([m1, m2]) \n kernel = scipy.stats.gaussian_kde(values) \n Z = np.reshape(kernel(positions).T, X.shape)\n return X, Y, Z\n\nFG_format = \"png\" \n\nRC_path = \"random_catalogs/\"\nFC_path = \"full_catalogs/\"\nOC_path = \"observed_catalogs/\"\nBS_path = \"xdl_beta_skeleton/\"\nML_path = \"masterlists/\"\nFG_path = \"figures/\"\nVE_path = \"volume_and_excentricity/\"\nAN_path = \"analysis/\"\n\nOC_filename = OC_FILE_IN\nRC_filename = \"{}.cat\".format(FILENUM)\nFC_filename = \"{}.cat\".format(FILENUM)\nBS_filename = \"{}.bsk\".format(FILENUM)\nML_filename = \"{}.mls\".format(FILENUM)\nFG_filename = \"{}.{}\".format(FILENUM,FG_format)\nVE_filename = \"{}.vae\".format(FILENUM)\nAN_filename = \"{}.info\".format(FILENUM)\n\n\nOC = np.loadtxt( OC_path + OC_filename )\nRC = np.loadtxt( RC_path + RC_filename )\nFC = np.loadtxt( FC_path + FC_filename )\nVE = np.genfromtxt( VE_path + VE_filename, delimiter=', ')[:,:-1]\n\nvolume = GET_VOLUME(0, 300, 0, 90, 45, 0)\n# 0-300 Mpc, 0-90 deg, 0-45 deg\n\nprint(\"Volume = \", volume)\n\nID = VE[:,0]\nN = ID.shape[0]\nN_part = VE[:,1]\nV = VE[:,2]\nr = VE[:,3]\na = VE[:,4]\nb = VE[:,5]\nc = VE[:,6]\n\n\n\n\n###########################################################\n# #\n# Original And Random Catalog Plots #\n# #\n###########################################################\n\nps = 0.1\nal = 0.2\n\nx = OC[:,0]\ny = OC[:,1]\nz = OC[:,2]\n\nfig = plt.figure( figsize=(10,10))\nax1 = fig.add_subplot(223)\nax2 = fig.add_subplot(224)\nax3 = fig.add_subplot(221)\n\nax1.scatter(x,y, s=ps, alpha=al)\nax2.scatter(x,z, s=ps, alpha=al)\nax3.scatter(y,z, s=ps, alpha=al)\n\nax1.set_xlabel('x (Mpc)')\nax1.set_ylabel('y (Mpc)')\nax2.set_xlabel('x (Mpc)')\nax2.set_ylabel('z (Mpc)')\nax3.set_xlabel('y (Mpc)')\nax3.set_ylabel('z (Mpc)')\n\nplt.tight_layout()\n\nplt.savefig(FG_path + \"oc_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\n\n\nx = RC[:,0]\ny = RC[:,1]\nz = RC[:,2]\n\nfig = plt.figure( figsize=(10,10))\nax1 = fig.add_subplot(223)\nax2 = fig.add_subplot(224)\nax3 = fig.add_subplot(221)\n\nax1.scatter(x,y, s=ps, alpha=al)\nax2.scatter(x,z, s=ps, alpha=al)\nax3.scatter(y,z, s=ps, alpha=al)\n\nax1.set_xlabel('x (Mpc)')\nax1.set_ylabel('y (Mpc)')\nax2.set_xlabel('x (Mpc)')\nax2.set_ylabel('z (Mpc)')\nax3.set_xlabel('y (Mpc)')\nax3.set_ylabel('z (Mpc)')\n\nplt.tight_layout()\n\nplt.savefig(FG_path + \"rc_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\n\n\n###########################################################\n# #\n# Basic Control Plots #\n# #\n###########################################################\n\nfig = plt.figure( figsize=(8,8))\nax = fig.add_subplot(111)\nax.scatter( ID, N_part)\nax.set_xlim(0,N)\nax.set_ylabel(\"Particles per Void\")\nax.set_xlabel(\"Void ID\")\nplt.tight_layout()\nplt.savefig(FG_path + \"particles_per_void_{}.{}\".format(FILENUM, FG_format))\n\nax.set_yscale(\"log\")\nplt.tight_layout()\nplt.savefig(FG_path + \"particles_per_void_log_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\n\nfig = plt.figure( figsize=(8,8))\nax = fig.add_subplot(111)\nax.hist(r)\nax.set_ylabel(\"Counts\")\nax.set_xlabel(\"r (Mpc)\")\nplt.tight_layout()\nplt.savefig(FG_path + \"r_eff_histogram_{}.{}\".format(FILENUM, FG_format))\n\nax.set_yscale(\"log\")\nplt.tight_layout()\nplt.savefig(FG_path + \"r_eff_histogram_log_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\n\nfig = plt.figure( figsize=(8,8))\nax = fig.add_subplot(111)\nax.scatter( N_part, r)\nax.set_ylabel(\"R_eff (Mpc)\")\nax.set_xlabel(\"Number of Particles\")\nplt.tight_layout()\nplt.savefig(FG_path + \"r_eff_vs_particles_{}.{}\".format(FILENUM, FG_format))\n\nax.set_yscale(\"log\")\nax.set_xscale('log')\nplt.tight_layout()\nplt.savefig(FG_path + \"r_eff_vs_particles_log_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\n\n\n\n###########################################################\n# #\n# Excentricity Function #\n# #\n###########################################################\n\nx = 1-c/a\n\nfig = plt.figure(figsize=(4,4))\nY, Bins, stuff = plt.hist(x, bins=20, normed=True, histtype=\"step\")\nplt.ylabel(r\"$f(N)dN$\")\nplt.xlabel(r\"$\\epsilon = 1 - c/a$\")\nplt.xlim(0,1)\nplt.ylim(0,4)\nplt.tight_layout()\nplt.savefig(FG_path + \"void_ellipticity_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\nX = (Bins[:-1] + Bins[1:])/2\ndata = vstack([ X, Y]).T\nnp.savetxt(AN_path + \"excentricity_\" + AN_filename, data)\n\n###########################################################\n# #\n# Prolate / Oblate #\n# #\n###########################################################\n\nm1, m2 = b/a, c/b\nxmin, xmax = 0, 1\nymin, ymax = 0, 1\n\nX, Y, Z = density_estimation(m1, m2)\n\nfig = plt.figure(figsize=(4,4))\nax = fig.add_subplot(111) \n# Show density \nax.imshow(np.rot90(Z), cmap=plt.cm.terrain_r, extent=[xmin, xmax, ymin, ymax])\n# Add contour lines\nplt.contour(X, Y, Z, cmap=\"terrain\") \nax.plot(m1, m2, 'k.', markersize=2) \nax.set_xlim([xmin, xmax]) \nax.set_ylim([ymin, ymax]) \n\nunitary = np.linspace(0,1)\nax.plot(unitary, unitary, c=\"k\")\nax.text(0.9, 0.1, \"Oblate\", horizontalalignment=\"right\", verticalalignment=\"center\" )\nax.text(0.1, 0.9, \"Prolate\", horizontalalignment=\"left\", verticalalignment=\"center\" )\n\nplt.xlabel(\"b / a\")\nplt.ylabel(\"c / b\")\nplt.tight_layout()\nplt.savefig(FG_path + \"void_two_axis_ratios_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\ndata = vstack([m1,m2]).T\nnp.savetxt(AN_path + \"prolate_oblate_\" + AN_filename, data)\n\n\n###########################################################\n# #\n# Volume Density Function #\n# #\n###########################################################\n\n# Observational data\nron_64 = np.loadtxt(\"data/ronconi_2019_z0_b64.dat\" , delimiter=\"\\t\")\nron_128 = np.loadtxt(\"data/ronconi_2019_z0_b128.dat\", delimiter=\"\\t\")\nron_256 = np.loadtxt(\"data/ronconi_2019_z0_b256.dat\", delimiter=\"\\t\")\nron_500 = np.loadtxt(\"data/ronconi_2019_z0_b500.dat\", delimiter=\"\\t\")\nade_PDF = np.loadtxt(\"data/adermann_2018_PDF.dat\")\n\nh = 0.6774\nV_h = volume * (h**3)\n\nBins = np.linspace(0.1,1.2, 20)\ndeltaBins = Bins[1]-Bins[0]\n\ny, Bins = np.histogram( np.log10(r*h), bins=Bins, normed=False )\nx = (Bins[:-1] + Bins[1:])/2\n\n# Plot Volume Density Function\n#\nfig = plt.figure(figsize=[8,6])\nfs = 20\nplt.title(\"Using Uniform-Random Points\", fontsize=fs)\n# Adermann data\nplt.plot(( ade_PDF[:,0] * ( 3 / (4 * pi)) ) ** (1 / 3.0), ade_PDF[:,1] , label=\"Adermann et al.\")\n# Ronconi Data\nplt.plot( ron_64[:,0], ron_64[:,1], label=\"Ronconi BZ 64\")\nplt.plot(ron_128[:,0], ron_128[:,1], label=\"Ronconi BZ 128\")\nplt.plot(ron_256[:,0], ron_256[:,1], label=\"Ronconi BZ 256\")\nplt.plot(ron_500[:,0], ron_500[:,1], label=\"Ronconi BZ 500\")\n# This Work!\nplt.scatter(10**x, y/( deltaBins * V_h), label=\"This Work - SDSS-Planck15\")\n\n# Labels, legends, scale.\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.ylim(0.00000001,0.1)\nplt.xlim(0.4,30)\nplt.xlabel(r\"$r_{eff} \\mathrm{ [Mpc/h]}$\", fontsize=fs)\nplt.ylabel(r\"$ dn / d \\ln r (\\mathrm{ [h/Mpc]}^3)$\", fontsize=fs)\nplt.legend(loc=3, fontsize=int(fs*0.7))\nplt.savefig(FG_path + \"volume_density_function_{}.{}\".format(FILENUM, FG_format))\nplt.close()\n\n\n## Store histogram data.\ndata = vstack([x,y / (deltaBins * V_h)]).T\nnp.savetxt(AN_path + \"volume_pdf_\" + AN_filename, data)\n\n\n###########################################################\n# #\n# Galaxy/Halo Density #\n# #\n###########################################################\n\nR_xyz = (OC[:,0]**2 + OC[:,1]**2 + OC[:,2]**2 )**0.5\n\nNBINS = 20\nY, BINS = np.histogram(R_xyz, bins = NBINS)\nX = []\nBINVOLUME = []\nfor i in range(len(BINS)-1):\n X.append( (BINS[i+1] + BINS[i]) / 2)\n\nX = np.array(X)\nY = np.array(Y)\n\nBINVOLUME = []\nfor i in range(len(BINS)-1):\n r1 = BINS[i]\n r2 = BINS[i+1]\n # limits for theta\n\n ## limits for theta\n t1 = 0 * np.pi / 180 # 0\n t2 = 90 * np.pi / 180 # 2*pi\n # limits for phi\n p1 = (90 - 45 ) * np.pi / 180 # 0\n p2 = (90 - 0 ) * np.pi / 180 # pi\n \n \n def diff_volume(p,t,r):\n return r**2*sin(p)\n\n BinVolume = np.abs(tplquad(diff_volume, r1, r2, lambda r: t1, lambda r: t2,\n lambda r,t: p1, lambda r,t: p2)[0])\n print(BinVolume)\n BINVOLUME.append(BinVolume)\n \nBINVOLUME = np.array(BINVOLUME)\n\nfig = plt.figure()\nplt.scatter(X, Y / BINVOLUME)\nplt.yscale(\"log\")\nplt.ylim(0.001,1)\nplt.xlabel(\"R (Mpc)\")\nplt.ylabel(\"dN / dV\")\nplt.title(\"Random Points Density Number (per shell)\")\nplt.savefig(FG_path + \"density_per_shell_{}.{}\".format(FILENUM, FG_format))\n", "sub_path": "25_vf2.0_varying_beta_and_nrand/morphological_analysis.py", "file_name": "morphological_analysis.py", "file_ext": "py", "file_size_in_byte": 14427, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 129, "usage_type": "attribute"}, {"api_name": "scipy.integrate.tplquad", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 139, "usage_type": "call"}, {"api_name": "scipy.stats.gaussian_kde", "line_number": 140, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 314, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 314, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "numpy.savetxt", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "numpy.rot90", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 339, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 351, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 352, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 352, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 353, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 354, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 354, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 355, "usage_type": "name"}, {"api_name": "numpy.savetxt", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 387, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 389, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 392, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 392, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 393, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 393, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 394, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 394, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 396, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 399, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 399, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 400, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 401, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 401, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 402, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 402, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 403, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 403, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 404, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 404, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 405, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 405, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 406, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 406, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 407, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 407, "usage_type": "name"}, {"api_name": "numpy.savetxt", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 440, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 441, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 443, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 444, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 450, "usage_type": "call"}, {"api_name": "scipy.integrate.tplquad", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 455, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 457, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 458, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 458, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 459, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 459, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 460, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 460, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 461, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 461, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 462, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 462, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 463, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 463, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 464, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 464, "usage_type": "name"}]} +{"seq_id": "64318020", "text": "'''\nCalculates slice maps with pymses\nRebekka Bieri, Sam Geen 2018\n'''\n\n# Get all the startup modules for the project\nfrom startup import *\n\n# Various extra pymses visualisation imports\nfrom pymses.utils import constants as C\nimport pymses.analysis.visualization as v\nscop = v.ScalarOperator\n\nfrom pymses.sources.ramses.output import *\nfrom pymses.analysis.visualization import *\n\nimport skimage.transform\n\nimport stellars\n\n# Axes up and across for each line of sight\nups = {'x':'z','y':'x','z':'y'}\nacrosses = {'x':'y','y':'z','z':'x'}\nlostoi = {\"x\":0, \"y\":1, \"z\":2}\n\nIMSIZE = 1024\n\nclass MaxTempOperator(Operator):\n def __init__(self, ro):\n self._unit = ro.info[\"unit_temperature\"].express(C.K)\n def Tfunc(dset):\n mufunc = lambda dset: 1./(0.76*(1.+dset[\"xHII\"]) + \\\n 0.25*0.24*(1.+dset[\"xHeII\"]+2.*dset[\"xHeIII\"]))\n T = dset[\"P\"]/dset[\"rho\"]*self._unit*mufunc(dset)\n return T\n d = {\"T\": Tfunc}\n Operator.__init__(self, d, is_max_alos=True)\n\n def operation(self, int_dict):\n mapT = int_dict.values()[0]\n return mapT\n\ndef pymses_func(ro, hydro):\n if hydro == \"rho\":\n unit = ro.info[\"unit_density\"].express(C.H_cc)\n return scop(lambda dset: dset[\"rho\"]*unit)\n if hydro == \"P\":\n unit = ro.info[\"unit_pressure\"].express(C.barye)\n return scop(lambda dset: dset[\"P\"]*unit)\n if hydro == \"T\":\n mufunc = lambda dset: 1./(0.76*(1.+dset[\"xHII\"]) + \\\n 0.25*0.24*(1.+dset[\"xHeII\"]+2.*dset[\"xHeIII\"]))\n unit = ro.info[\"unit_temperature\"].express(C.K)\n return scop(lambda dset: dset[\"P\"]/dset[\"rho\"]*unit*mufunc(dset))\n if \"xH\" in hydro:\n unit = 1.0\n return scop(lambda dset: dset[hydro]*unit)\n if hydro == \"Bmag\":\n def bmagfunc(dset):\n b = 0.5*(dset[\"B-left\"]+dset[\"B-right\"])\n # Magnitude of the 3-vector for each cell\n return np.sqrt((b**2).sum(axis=1))\n return scop(bmagfunc)\n if hydro == \"IRflux\":\n #def irfluxfunc(dset):\n # print 'in HERE'\n # exit()\n # TIR_Trap_op = dset[\"Pnontherm\"]\n # return TIR_Trap_op\n print('in HERE [1]')\n #exit()\n return scop(lambda dset: dset[\"Pnontherm\"])\n\n\n # None of those? Return unitless\n sco = scop(lambda dset: dset[hydro])\n return sco\n\ndef hydro_range(hydro):\n if hydro == \"rho\":\n return (0,8)\n if hydro == \"P\":\n return (None, None) # No idea\n if hydro == \"T\":\n return (0,5)\n if \"xH\" in hydro:\n return (-6,0)\n if hydro == \"gpe\":\n return (None, None)\n if hydro == \"Bmag\":\n return (None, None)\n return (None,None)\n \ndef hydro_label(hydro):\n if hydro == \"rho\":\n return \"Density / atoms/cm$^{3}$\"\n if hydro == \"P\":\n return \"Pressure / dyne\"\n if hydro == \"T\":\n return \"Temperature / K\"\n if \"xH\" in hydro:\n return \"Ionisation Fraction \"+hydro\n if hydro == \"gpe\":\n return \"Gravitational Potential Energy\"\n if hydro == \"Bmag\":\n return \"|B| (code units)\"\n\n\ndef _MapSlice(snap,hydro='rho',los='z',zoom=1.0,starC=False):\n amr = hydrofuncs.amr_source(snap,hydro)\n\n try:\n snap = snap.RawData()\n except:\n pass\n\n centre = np.zeros(3)+0.5\n boxlen = snap.info[\"boxlen\"]\n levelmax = snap.info[\"levelmax\"]\n dx = 2.0**(-levelmax)\n up = ups[los]\n across = acrosses[los]\n\n # Centre the image on the first star to form?\n if starC:\n stars = stellars.FindStellar(snap)\n centre[lostoi[across]] = np.array([stars.x[0], stars.y[0], stars.z[0]])[lostoi[across]]/boxlen\n centre[lostoi[up]] = np.array([stars.x[0], stars.y[0], stars.z[0]])[lostoi[up]]/boxlen\n centre[lostoi[los]] = np.array([stars.x[0], stars.y[0], stars.z[0]])[lostoi[los]]/boxlen\n\n size = np.zeros(2)+zoom\n\n\n def makeslice(snap,hydro):\n hydro_op = scop(hydrofuncs.scale_by_units(snap,hydro))\n slc = pymses.analysis.visualization.SliceMap(amr, cam, hydro_op, z=0.0)\n print(\"Made slice (min/max:\", slc.min(), slc.max(), \")\")\n return slc\n\n\n if not \"vorticity\" in hydro or \"vdispersion\" in hydro:\n cam = v.Camera(center=centre, line_of_sight_axis=los, \n region_size=size, up_vector=up, \n map_max_size=IMSIZE, log_sensitive=True)\n\n slc = makeslice(snap,hydro)\n else:\n # MAKE VORTICITY MAPS YES THIS IS HORRIBLE\n # Get pixel size\n NEWIMSIZE = IMSIZE\n # Step across multiple pixels / cells?\n if \"2px\" in hydro:\n NEWIMSIZE = IMSIZE / 2\n if \"4px\" in hydro:\n NEWIMSIZE = IMSIZE/ 4\n #dxcam = zoom / float(NEWIMSIZE) * 0.5 # Undersample to prevent cell skipping effects\n dxcam = dx * float(IMSIZE) / float(NEWIMSIZE)\n #centre = centre+0.5*dxcam\n # Make camera again in case\n cam = v.Camera(center=centre, line_of_sight_axis=los, \n region_size=size, up_vector=up, \n map_max_size=NEWIMSIZE, log_sensitive=True)\n # dx in km (to match velocity units)\n # We pre-divide every slice map by this to make calculations easier later\n dxphys = dxcam * boxlen * pcincm\n if \"vorticity\" in hydro:\n # Get xyz in frame of image (ensure right-handed coordinate system)\n # We need this because we calculate d/dx etc in frame of image\n vx0 = makeslice(snap,\"v\"+across)\n vy0 = makeslice(snap,\"v\"+up) \n vz0 = makeslice(snap,\"v\"+los) \n # Make new slice + dx\n cx = centre+0.0\n cx[lostoi[across]] += dxcam\n cam = v.Camera(center=cx, line_of_sight_axis=los, \n region_size=size, up_vector=up, \n map_max_size=NEWIMSIZE, log_sensitive=True)\n vxx = makeslice(snap,\"v\"+across) \n vyx = makeslice(snap,\"v\"+up)\n vzx = makeslice(snap,\"v\"+los)\n # Make new slice + dy\n cy = centre+0.0\n cy[lostoi[up]] += dxcam\n cam = v.Camera(center=cy, line_of_sight_axis=los, \n region_size=size, up_vector=up, \n map_max_size=NEWIMSIZE, log_sensitive=True)\n vxy = makeslice(snap,\"v\"+across) \n vyy = makeslice(snap,\"v\"+up) \n vzy = makeslice(snap,\"v\"+los) \n # Make new slice + dz\n cz = centre+0.0\n # HACK TEST\n cz[lostoi[los]] += dxcam\n cam = v.Camera(center=cz, line_of_sight_axis=los, \n region_size=size, up_vector=up, \n map_max_size=NEWIMSIZE, log_sensitive=True)\n vxz = makeslice(snap,\"v\"+across)\n vyz = makeslice(snap,\"v\"+up) \n vzz = makeslice(snap,\"v\"+los) \n # Get vorticity components in s^{-1}\n # x1000 to convert from km/s to cgs\n vortx = ((vzy - vz0) - (vyz - vy0)) / dxphys * 1000.0\n vorty = ((vxz - vx0) - (vzx - vz0)) / dxphys * 1000.0\n vortz = ((vyx - vy0) - (vxy - vx0)) / dxphys * 1000.0\n # N = 4 here for the mean and velocity dispersion\n vxmean = (vx0 + vxx + vxy + vxz) / 4.0\n vymean = (vy0 + vyx + vyy + vyz) / 4.0\n vzmean = (vz0 + vzx + vzy + vzz) / 4.0\n #vdisp = 0.0\n #vdisp += (vx0 - vxmean)**2 + (vxx - vxmean)**2 + (vxy - vxmean)**2 + (vxz - vxmean)**2\n #vdisp += (vy0 - vymean)**2 + (vyx - vymean)**2 + (vyy - vymean)**2 + (vyz - vymean)**2\n #vdisp += (vz0 - vzmean)**2 + (vzx - vzmean)**2 + (vzy - vzmean)**2 + (vzz - vzmean)**2\n #vdisp = np.sqrt(vdisp / 4.0) * 1000.0 # --> cm/s\n # Speed in cgs from km/s\n spd = np.sqrt(vx0**2 + vy0**2 + vz0**2) * 1000.0\n # Make vorticity map\n # Find magnitude in Myr^{-1}\n slc = np.sqrt(vortx**2 + vorty**2 + vortz**2) * Myrins \n # Find turnover timescale?\n if \"timescale\" in hydro:\n slc = 1.0 / slc\n # Compare the eddy turnover speed (dxphys / curl V) to the bulk gas speed\n if \"speedcompare\" in hydro:\n slc = dxphys * slc / Myrins / spd\n # Make velocity dispersion\n if \"vdispersion\" in hydro:\n # Camera plane slice\n vx0 = makeslice(snap,\"v\"+across)\n vy0 = makeslice(snap,\"v\"+up) \n vz0 = makeslice(snap,\"v\"+los)\n # +los slice\n cplus = centre+0.0\n cplus[lostoi[los]] += dxcam\n cam = v.Camera(center=cplus, line_of_sight_axis=los, \n region_size=size, up_vector=up, \n map_max_size=NEWIMSIZE, log_sensitive=True)\n vxp = makeslice(snap,\"v\"+across)\n vyp = makeslice(snap,\"v\"+up) \n vzp = makeslice(snap,\"v\"+los)\n # -los slice\n cminus = centre+0.0\n cminus[lostoi[los]] -= dxcam\n cam = v.Camera(center=cminus, line_of_sight_axis=los, \n region_size=size, up_vector=up, \n map_max_size=NEWIMSIZE, log_sensitive=True)\n vxm = makeslice(snap,\"v\"+across)\n vym = makeslice(snap,\"v\"+up) \n vzm = makeslice(snap,\"v\"+los)\n # Make thin lasagna of 3 slices around the middle image\n # Will have shape 3, NEWIMSIZE, NEWIMSIZE\n vxgrid = np.array([vxm,vx0,vxp])\n # Make statistics\n vxmean = (vx0 + vxp + vxm) / 3.0\n vymean = (vy0 + vyp + vym) / 3.0\n vzmean = (vz0 + vzp + vzm) / 3.0\n nim = NEWIMSIZE\n vdisp = np.zeros((nim,nim))\n nsamples = 27 # 3x3x3 around each pixel\n # i1 is for the 3-deep slice lasagna\n for i1 in [0,1,2]:\n # i2 is the slice x axis\n for i2 in [-1,0,1]:\n # i3 is the slice y axis\n for i3 in [-1,0,1]:\n smid = (slice(1,nim-1),slice(1,nim-1))\n sgrid = (i1,slice(1+i2,nim-1+i2),slice(1+i3,nim-1+i3))\n vdisp[smid] += (vxgrid[sgrid] - vxmean[smid])**2\n vdisp[smid] += (vygrid[sgrid] - vymean[smid])**2\n vdisp[smid] += (vzgrid[sgrid] - vzmean[smid])**2\n # Note: this is the 3D velocity dispersion\n vdisp = np.sqrt(vdisp / float(nsamples))\n # Bulk speed\n spd = np.sqrt(vx0**2 + vy0**2 + vz0**2)\n # Make images\n slc = vdisp +0.0\n if \"speedcompare\" in hydro:\n slc = vdisp / spd\n # Resize the output image if needed\n if NEWIMSIZE != IMSIZE:\n slc = skimage.transform.resize(slc, (IMSIZE, IMSIZE))\n return centre[lostoi[across]], centre[lostoi[up]], slc\n\n_MapSliceHamu = Hamu.Algorithm(_MapSlice)\n#_MapSliceHamu._force_replace_cache = True\n\nclass SliceMap(object):\n '''\n Slice map \n '''\n def __init__(self,snap,hydro,los='z',pixlength=None,zoom=1.0,starC=False):\n '''\n los - Line of sight (strings 'x','y','z')\n pixlength - Length of pixels in parsecs\n zoom - Factor to zoom (<1 = zoom, 1 = full box)\n '''\n self._snap = snap.RawData()\n self._los = los\n self._hydro = hydro\n self._zoom = zoom\n self._starC = starC\n self._cx = None\n self._cy = None\n if pixlength is None:\n # NOTE: boxlen should be in pc!!\n #pixlength = snap.info[\"boxlen\"] * zoom / float(IMSIZE)\n pixlength = self._snap.info[\"boxlen\"] / float(IMSIZE)\n self._pixlength = pixlength\n self._slice = None\n\n def getSliceMap(self):\n #if \"vdispersion\" in self._hydro or \"vorticity\" in self._hydro:\n #Hamu.GLOBALFORCEREPLACECACHE = True\n if self._slice is None:\n self._cx, self._cy, self._slice = _MapSliceHamu(self._snap.hamusnap,self._hydro,self._los,self._zoom, self._starC)\n #Hamu.GLOBALFORCEREPLACECACHE = False\n return self._cx, self._cy, self._slice\n", "sub_path": "CDWinds/scripts/sliceMap.py", "file_name": "sliceMap.py", "file_ext": "py", "file_size_in_byte": 12314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pymses.analysis.visualization.ScalarOperator", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pymses.analysis.visualization", "line_number": 12, "usage_type": "name"}, {"api_name": "pymses.utils.constants.K", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pymses.utils.constants", "line_number": 30, "usage_type": "name"}, {"api_name": "pymses.utils.constants.H_cc", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pymses.utils.constants", "line_number": 45, "usage_type": "name"}, {"api_name": "pymses.utils.constants.barye", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pymses.utils.constants", "line_number": 48, "usage_type": "name"}, {"api_name": "pymses.utils.constants.K", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pymses.utils.constants", "line_number": 53, "usage_type": "name"}, {"api_name": "stellars.FindStellar", "line_number": 126, "usage_type": "call"}, {"api_name": "pymses.utils.analysis.visualization.SliceMap", "line_number": 136, "usage_type": "call"}, {"api_name": "pymses.utils.analysis", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pymses.utils", "line_number": 136, "usage_type": "name"}, {"api_name": "pymses.analysis.visualization.Camera", "line_number": 142, "usage_type": "call"}, {"api_name": "pymses.analysis.visualization", "line_number": 142, "usage_type": "name"}, {"api_name": "pymses.analysis.visualization.Camera", "line_number": 160, "usage_type": "call"}, {"api_name": "pymses.analysis.visualization", "line_number": 160, "usage_type": "name"}, {"api_name": "pymses.analysis.visualization.Camera", "line_number": 175, "usage_type": "call"}, {"api_name": "pymses.analysis.visualization", "line_number": 175, "usage_type": "name"}, {"api_name": "pymses.analysis.visualization.Camera", "line_number": 184, "usage_type": "call"}, {"api_name": "pymses.analysis.visualization", "line_number": 184, "usage_type": "name"}, {"api_name": "pymses.analysis.visualization.Camera", "line_number": 194, "usage_type": "call"}, {"api_name": "pymses.analysis.visualization", "line_number": 194, "usage_type": "name"}, {"api_name": "pymses.analysis.visualization.Camera", "line_number": 234, "usage_type": "call"}, {"api_name": "pymses.analysis.visualization", "line_number": 234, "usage_type": "name"}, {"api_name": "pymses.analysis.visualization.Camera", "line_number": 243, "usage_type": "call"}, {"api_name": "pymses.analysis.visualization", "line_number": 243, "usage_type": "name"}, {"api_name": "skimage.transform.transform.resize", "line_number": 280, "usage_type": "call"}, {"api_name": "skimage.transform.transform", "line_number": 280, "usage_type": "attribute"}, {"api_name": "skimage.transform", "line_number": 280, "usage_type": "name"}]} +{"seq_id": "611098297", "text": "import json\nimport datetime\nimport random\nimport pprint\n\nclass Animal:\n\n def __init__(self, name, nb, aliment, nb_pattes, food):\n self.name = name\n self.nb = nb\n self.aliment = aliment\n self.nb_pattes = nb_pattes\n self.food = food\n\n def __str__(self):\n return \"un animal qui est un(e) \" + self.name + \" il possède \" + str(self.nb) + \" Jambes, il est de type \" + self.aliment + \", il a \" + str(self.nb_pattes) + \" pattes et mange environ \" + str(self.food) + \" grammes de nourriture\"\n\n def __add__(self, animal, type, birth_year):\n\n parsed_json = (json.loads(json_data))\n print(json.dumps(parsed_json, indent=4, sort_keys=True))\n\nwith open('farm.json') as json_file:\n data = json.load(json_file)\n for p in data['animal list']:\n\n print('type : ' + p['type'])\n\nif __name__ == '__main__':\n\n my_farm = [\"farmville\", \"farmcity\", \"farmland\"]\n\nfor x in range(len(my_farm)):\n print (my_farm[x])\n\n#rand()\n", "sub_path": "Ex03.py", "file_name": "Ex03.py", "file_ext": "py", "file_size_in_byte": 978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "json.load", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "346371462", "text": "#!/usr/bin/env python\n\nimport cgi\nimport os\nimport xml.etree.cElementTree as etree\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import webapp\nfrom google.appengine.api import memcache\nfrom google.appengine.api import urlfetch\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nimport facebook\nfrom google.appengine.api import users\nimport webapp2\n\nfrom model import *\n\n\n## API Keys go here!\n_FbApiKey = '284783798323209'\n_FbSecret = '488d93b118272ac03038445c1f4c3c15'\n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n \n products = []\n errors = []\n \n ## instantiate the Facebook API wrapper with your FB App's keys\n fb = facebook.Facebook(_FbApiKey, _FbSecret)\n \n ## check that the user is logged into FB and has added the app\n ## otherwise redirect to where the user can login and install\n if fb.check_session(self.request) and fb.added:\n pass\n '''\n else:\n url = fb.get_add_url()\n self.response.out.write('')\n return\n '''\n path = os.path.join(os.path.dirname(__file__), 'main.html')\n self.response.out.write(template.render(path, {}))\n\n\n # read in content from input box\n # input must be of form:\n #\n # college\n # first last\n # item\n\n content = self.request.get(\"content\").split()\n \n # parse input\n if len(content) != 4:\n self.response.out.write(\"Invalid entry\")\n\n else:\n collegeName = content[0]\n userFirstName = content[1]\n userLastName = content[2]\n itemName = content[3]\n\n # create db entries, store them if they're new\n\n # TODO:\n # set up appropriate hierarchy:\n # user parent should be college, item parent should\n # be user.\n\n college = Network(name = collegeName)\n if college.exists() == None:\n Network.put(college)\n else:\n college = college.exists()\n\n user = User(firstName = userFirstName,\n lastName = userLastName)\n if not user.exists():\n User.put(user)\n\n item = Item(name = itemName)\n if not item.exists():\n Item.put(item)\n def post(self):\n self.get()\n\n\n\nclass FeedMe(webapp.RequestHandler):\n \"\"\"\n \"\"\"\n def get(self):\n self.response.write('You wrote:
')\n        # self.response.write(cgi.escape(self.request.get('content')))\n        self.response.write(cgi.escape(self.request.path))\n        self.response.write('
')\n\n\napplication = webapp.WSGIApplication(\n [('/', MainPage),\n ('/stuff.*', FeedMe),],\n debug=True)\n\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 22, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 22, "usage_type": "name"}, {"api_name": "facebook.Facebook", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 42, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 42, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 90, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 90, "usage_type": "name"}, {"api_name": "cgi.escape", "line_number": 96, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.WSGIApplication", "line_number": 100, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp", "line_number": 100, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.run_wsgi_app", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "547252375", "text": "import signal, os\nfrom logging import info\nimport gobject\n\nimport rox\nfrom rox import g, basedir\nimport constants\nimport children\n\nstocks = ['rox-halt', 'rox-suspend']\n\no_halt = rox.options.Option('halt_command', 'halt')\no_reboot = rox.options.Option('reboot_command', 'reboot')\no_suspend = rox.options.Option('suspend_command', 'xset dpms force off')\n\ndef init():\n\tfactory = g.IconFactory()\n\tfor name in stocks:\n\t\tpath = os.path.join(rox.app_dir, \"images\", name + \".png\")\n\t\tinfo(\"Loading image %s\", path)\n\t\tpixbuf = g.gdk.pixbuf_new_from_file(path)\n\n\t\tiset = g.IconSet(pixbuf)\n\t\tfactory.add(name, iset)\n\tfactory.add_default()\n\n_logged_in = False\n\ndef may_run_login_script():\n\t\"\"\"Called once the WM is running.\"\"\"\n\tglobal _logged_in\n\tglobal login_child\n\n\tif _logged_in:\n\t\treturn\n\n\t_logged_in = True\n\n\t# Run ROX-Filer\n\trun_rox_process()\n\n\t# Run Login script\n\n\tlogin = basedir.load_first_config(constants.site, 'ROX-Session', 'Login') or \\\n\t\tos.path.join(rox.app_dir, 'Login')\n\n\tlogin_child = os.spawnlp(os.P_NOWAIT, login, login)\n\n\tdef login_died(status):\n\t\tglobal login_child\n\t\tlogin_child = None\n\t\tif status != 0:\n\t\t\trox.alert(_(\"Your login script ('%s') failed. \"\n\t\t\t\t\"I'll give you an xterm to try and fix it. ROX-Session \"\n\t\t\t\t\"itself is running fine though - run me a second time \"\n\t\t\t\t\"to logout.\"))\n\t\t\tos.spawnlp(os.P_NOWAIT, 'xterm', 'xterm')\n\n\tchildren.register_child(login_child, login_died)\n\ndef run_rox_process():\n\tglobal rox_pid\n\trun_rox = basedir.load_first_config(constants.site, 'ROX-Session', 'RunROX') or \\\n\t\tos.path.join(rox.app_dir, 'RunROX')\n\ttry:\n\t\trox_pid = os.spawnlp(os.P_NOWAIT, run_rox, run_rox, rox.app_dir)\n\t\tchildren.register_child(rox_pid, rox_process_died)\n\texcept:\n\t\trox.report_exception()\n\t\trox_process_died(0)\n\ndef rox_process_died(status):\n\tglobal rox_pid\n\trox_pid = None\n\n\tbox = g.MessageDialog(parent = None, flags = 0, type = g.MESSAGE_QUESTION,\n\t\t\tbuttons = 0,\n\t\t\tmessage_format = _(\"ROX-Filer has terminated (crashed?).\"\n\t\t\t\t\t \"You should probably try to restart it.\"))\n\n\tfor stock, label, response in [\n\t\t\t(g.STOCK_NO, _(\"Do nothing\"), 0),\n (g.STOCK_EXECUTE, _(\"Run Xterm\"), 1),\n (g.STOCK_REFRESH, _(\"_Restart\"), 2)]:\n\t\tbutton = rox.ButtonMixed(stock, label)\n\t\tbutton.set_flags(g.CAN_DEFAULT)\n\t\tbox.add_action_widget(button, response)\n\t\tbutton.show()\n\t\n\tbox.set_default_response(2)\n\n\tr = box.run()\n\tbox.destroy()\n\n\tif r == 2:\n\t\trun_rox_process()\n\telif r == 1:\n\t\tos.spawnlp(os.P_NOWAIT, 'xterm', 'xterm')\n", "sub_path": "session.py", "file_name": "session.py", "file_ext": "py", "file_size_in_byte": 2494, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rox.options.Option", "line_number": 12, "usage_type": "call"}, {"api_name": "rox.options", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rox.options.Option", "line_number": 13, "usage_type": "call"}, {"api_name": "rox.options", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rox.options.Option", "line_number": 14, "usage_type": "call"}, {"api_name": "rox.options", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rox.g.IconFactory", "line_number": 17, "usage_type": "call"}, {"api_name": "rox.g", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rox.app_dir", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 20, "usage_type": "call"}, {"api_name": "rox.g.gdk.pixbuf_new_from_file", "line_number": 21, "usage_type": "call"}, {"api_name": "rox.g.gdk", "line_number": 21, "usage_type": "attribute"}, {"api_name": "rox.g", "line_number": 21, "usage_type": "name"}, {"api_name": "rox.g.IconSet", "line_number": 23, "usage_type": "call"}, {"api_name": "rox.g", "line_number": 23, "usage_type": "name"}, {"api_name": "rox.basedir.load_first_config", "line_number": 44, "usage_type": "call"}, {"api_name": "rox.basedir", "line_number": 44, "usage_type": "name"}, {"api_name": "constants.site", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rox.app_dir", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.spawnlp", "line_number": 47, "usage_type": "call"}, {"api_name": "os.P_NOWAIT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rox.alert", "line_number": 53, "usage_type": "call"}, {"api_name": "os.spawnlp", "line_number": 57, "usage_type": "call"}, {"api_name": "os.P_NOWAIT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "children.register_child", "line_number": 59, "usage_type": "call"}, {"api_name": "rox.basedir.load_first_config", "line_number": 63, "usage_type": "call"}, {"api_name": "rox.basedir", "line_number": 63, "usage_type": "name"}, {"api_name": "constants.site", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "rox.app_dir", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.spawnlp", "line_number": 66, "usage_type": "call"}, {"api_name": "os.P_NOWAIT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "rox.app_dir", "line_number": 66, "usage_type": "attribute"}, {"api_name": "children.register_child", "line_number": 67, "usage_type": "call"}, {"api_name": "rox.report_exception", "line_number": 69, "usage_type": "call"}, {"api_name": "rox.g.MessageDialog", "line_number": 76, "usage_type": "call"}, {"api_name": "rox.g", "line_number": 76, "usage_type": "name"}, {"api_name": "rox.g.MESSAGE_QUESTION", "line_number": 76, "usage_type": "attribute"}, {"api_name": "rox.g.STOCK_NO", "line_number": 82, "usage_type": "attribute"}, {"api_name": "rox.g", "line_number": 82, "usage_type": "name"}, {"api_name": "rox.g.STOCK_EXECUTE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "rox.g", "line_number": 83, "usage_type": "name"}, {"api_name": "rox.g.STOCK_REFRESH", "line_number": 84, "usage_type": "attribute"}, {"api_name": "rox.g", "line_number": 84, "usage_type": "name"}, {"api_name": "rox.ButtonMixed", "line_number": 85, "usage_type": "call"}, {"api_name": "rox.g.CAN_DEFAULT", "line_number": 86, "usage_type": "attribute"}, {"api_name": "rox.g", "line_number": 86, "usage_type": "name"}, {"api_name": "os.spawnlp", "line_number": 98, "usage_type": "call"}, {"api_name": "os.P_NOWAIT", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "500664636", "text": "#Before you run this program. Please satisfy all the requirements and read all of the comments in the program as well.\n#Please make sure that to whomsover you are going to send the message, that number is in your contact list. (it doesn't\n#matter if you are in there contact list or not).\n#Don't forget to change paths.\n#Don't forget to choose proper selenium driver for your operating system and browser. https://www.selenium.dev/downloads/\n\nfrom selenium import webdriver\nfrom time import sleep\nimport pyautogui as gui\nfrom selenium.webdriver.common.keys import Keys\nimport pandas as pdata\nimport random as random_num\nfrom selenium.common.exceptions import NoSuchElementException\n\narray_phone = []\narray_notreached = []\nstring1 = str\nstring2 = str\n\n#Below-mentioned path is for mac. You may change it accordingly for windows.\ndata_file = pdata.read_excel('/Users/user_name/phone_list.xls', 'Sheet1')\n\n# this array can also be used in place of excel file\n# numbers = {'+919999999999', '+919999999999', '+919999999999', '+919999999999'}\n\nnumbers = data_file['Phone']\nnames = data_file['Name']\n\n#Don't forget to choose proper selenium driver for your operating system and browser. https://www.selenium.dev/downloads/\ndriver = webdriver.Chrome(\"/drivers/chrome\")\ndriver.get('https://web.whatsapp.com/')\n\n# give path of jpg file which needs to be sent\nfilepath = input('Enter your filepath (images/video): ')\n\n#it will keep on waiting for your to press enter. Because sometimes scanning QR code takes time.\ninput('Enter anything after scanning QR code')\n\nfor i, n in zip(numbers, names):\n\n try:\n user = driver.find_element_by_class_name('_13NKt') #this class name changes after a few days by whatsapp so\n # I suggest you to please refer the readme for getting value of it.\n user.send_keys(i)\n user.send_keys(Keys.RETURN)\n\n attachment_box = driver.find_element_by_xpath('//div[@title = \"Attach\"]')\n attachment_box.click()\n\n image_box = driver.find_element_by_xpath(\n '//input[@accept=\"image/*,video/mp4,video/3gpp,video/quicktime\"]')\n image_box.send_keys(filepath)\n sleep(random_num.randint(5, 10))\n\n user2 = driver.find_element_by_class_name('_13NKt') #this class name changes after a few days by whatsapp so\n # I suggest you to please refer the readme for getting value of it.\n user2.send_keys('Hello ' + n + ' Any Caption for the image')\n send_button = driver.find_element_by_xpath('//span[@data-icon=\"send\"]')\n send_button.click()\n #I have added random sleep and refresh to stop your account from getting blocked and whatsapp should not\n # get to know about this automation.\n sleep(random_num.randint(10, 15))\n driver.refresh()\n sleep(random_num.randint(20, 45))\n\n except NoSuchElementException:\n array_phone.append(i)\n\n try:\n #In some cases there is a problem that the phone number is not in contact list of yours. Then the number\n # stays in search bar. So this option is to clear the occupied space.\n user = driver.find_element_by_class_name('_13NKt') #this class name changes after a few days by whatsapp so\n # I suggest you to please refer the readme for getting value of it.\n user.clear()\n i = 0\n driver.refresh()\n continue\n except NoSuchElementException:\n array_notreached.append(i)\n\nprint(\"Numbers not on whatsapp\")\nfor m in array_phone:\n print(m)\n\n\nprint(\"Numbers not reachable due to network issue\")\nfor nr in array_notreached:\n print(nr)", "sub_path": "whatsapp-message-image-sender.py", "file_name": "whatsapp-message-image-sender.py", "file_ext": "py", "file_size_in_byte": 3631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.read_excel", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 30, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 30, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 45, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 66, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "229155348", "text": "import datetime\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\n\nfrom messaging.models import *\n\n\nTODAY = datetime.date.today()\nclass NotificationTests(TestCase):\n @classmethod\n def setUpClass(cls):\n return super().setUpClass()\n\n @classmethod\n def setUpTestData(cls):\n cls.usr = User.objects.create_user(username=\"someone\")\n cls.notification = Notification.objects.create(\n user=cls.usr,\n title='Title',\n message=\"Message\"\n )\n def setUp(self):\n pass\n\n def test_create_notification(self):\n obj = Notification.objects.create(\n user=self.usr,\n title='Title',\n message=\"Message\"\n )\n\n self.assertIsInstance(obj, Notification)\n\n def test_open_notification(self):\n self.assertFalse(self.notification.read)\n self.notification.open()\n self.assertTrue(self.notification.read)\n\n\n\nclass ChatTests(TestCase):\n @classmethod\n def setUpClass(cls):\n return super().setUpClass()\n\n @classmethod\n def setUpTestData(cls):\n cls.usr = User.objects.create_user(username=\"someone\")\n cls.receiver = User.objects.create_user(username=\"sometwo\")\n\n cls.chat = Chat.objects.create(\n sender=cls.usr,\n receiver=cls.receiver\n )\n \n def setUp(self):\n pass\n\n def test_create_chat(self):\n obj = Chat.objects.create(\n sender=self.usr,\n receiver=self.receiver\n )\n self.assertIsInstance(obj, Chat)\n\n def test_user_chats(self):\n self.assertTrue(Chat.user_chats(self.usr).count() > 0)\n\n def test_chat_messages(self):\n bubble = Bubble.objects.create(\n sender=self.usr,\n message_text = 'Hello world',\n chat=self.chat\n )\n\n self.assertEqual(self.chat.messages.first().pk, bubble.pk)\n\nclass GroupTests(TestCase):\n @classmethod\n def setUpClass(cls):\n return super().setUpClass()\n\n @classmethod\n def setUpTestData(cls):\n cls.usr = User.objects.create_user(username=\"someone\")\n cls.receiver = User.objects.create_user(username=\"sometwo\")\n\n cls.group = Group.objects.create(\n admin=cls.usr,\n )\n cls.group.participants.add(cls.usr)\n cls.group.participants.add(cls.receiver)\n\n \n def setUp(self):\n pass\n\n def test_create_group(self):\n obj = Group.objects.create(\n admin=self.usr,\n name='Group B'\n )\n\n self.assertIsInstance(obj, Group)\n\n def test_group_messages(self):\n bubble = Bubble.objects.create(\n sender=self.usr,\n message_text = 'Hello world',\n group=self.group\n )\n\n self.assertEqual(self.group.messages.first().pk, bubble.pk)\n\nclass BubbleTests(TestCase):\n @classmethod\n def setUpClass(cls):\n return super().setUpClass()\n\n @classmethod\n def setUpTestData(cls):\n cls.usr = User.objects.create_user(username=\"someone\")\n cls.receiver = User.objects.create_user(username=\"sometwo\")\n\n cls.chat = Chat.objects.create(\n sender=cls.usr,\n receiver=cls.receiver\n )\n\n cls.group = Group.objects.create(\n admin=cls.usr,\n )\n\n cls.bubble = Bubble.objects.create(\n sender=cls.usr,\n message_text = \"Hi\",\n chat=cls.chat,\n )\n\n def test_create_chat_bubble(self):\n obj = Bubble.objects.create(\n sender=self.usr,\n message_text = \"Hi\",\n chat=self.chat,\n )\n\n self.assertIsInstance(obj, Bubble)\n\n\n def test_create_group_bubble(self):\n obj = Bubble.objects.create(\n sender=self.usr,\n message_text = \"Hi\",\n group=self.group,\n )\n\n self.assertIsInstance(obj, Bubble)\n\nclass EmailTests(TestCase):\n @classmethod\n def setUpClass(cls):\n return super().setUpClass()\n\n @classmethod\n def setUpTestData(cls):\n cls.usr = User.objects.create_user(username=\"someone\")\n cls.from_address = EmailAddress.objects.create(address='test@email.com')\n cls.to_address = EmailAddress.objects.create(address='to@email.com')\n\n cls.profile = UserProfile.objects.create(\n user=cls.usr,\n email_address='test@email.com',\n email_password='gibberish',\n )\n\n cls.email = Email.objects.create(\n created_timestamp=datetime.datetime.now(),\n sent_from=cls.from_address,\n to=cls.to_address,\n server_id='1',\n body='

Hello world

',\n subject='subject',\n )\n\n def test_create_email_address(self):\n obj = EmailAddress.objects.create(address='email@address.com')\n self.assertIsInstance(obj, EmailAddress)\n \n\n def test_create_user_profile(self):\n self.assertIsInstance(self.profile, UserProfile)\n\n def test_create_email(self):\n obj= Email.objects.create(\n sent_from=self.from_address,\n to=self.to_address,\n created_timestamp=datetime.datetime.now(),\n server_id='123',\n body='

Hello world

',\n subject='subject',\n )\n self.assertIsInstance(obj, Email)\n\n def test_get_email_address_existing(self):\n self.assertIsInstance(\n EmailAddress.get_address('test@email.com'), \n EmailAddress)\n\n def test_get_email_address_new(self):\n self.assertIsInstance(\n EmailAddress.get_address('test99@email.com'), \n EmailAddress)", "sub_path": "messaging/tests/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 5689, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.date.today", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.test.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 17, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 42, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 49, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 50, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 79, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 86, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 87, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 116, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 123, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 123, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 123, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 124, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 124, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 160, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 167, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 167, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 178, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "attribute"}]} +{"seq_id": "368651260", "text": "#!/usr/bin/python3\n# GIGASECOND ANNIVERSARY\n# LANGUAGE: PYTHON\n\n# Write a function that will calculate the date that someone will celebrate their 1 gigasecond anniversary.\n\n# Note: A gigasecond is one billion (10**9) seconds.\n\n# The input is three parameters representing someone's birthday.\n\n# As a convenience for celebration planning, the function should also calculate the day of the week and the number of days from today.\n\n# The output should be an array formatted as such: [\"YYYY-MM-DD\", 'day_of_the_week', days_until]\n\n# Examples:\n\n# gigasecond(1988, 5, 15) # [\"2020-01-22\", \"Wednesday\", \"1764 days left\"]\n# gigasecond(2015, 2, 17) # [\"2046-10-26\", \"Friday\", \"11538 days left\"]\nfrom datetime import date, timedelta\nfrom calendar import weekday\n\n\ndef gigasecond(byear, bmonth, bday):\n res=[]\n bd = date(byear, bmonth, bday)\n duration = timedelta(seconds=10**9)\n resd = bd + duration\n res.append(resd.isoformat())\n res.append(['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'][weekday(resd.year, resd.month, resd.day)])\n res.append(\"{} {}\".format((resd-date.today()).days , 'days left'))\n print(res)\n\ndef main():\n year=int(input(\"Please enter your birth year\"))\n month=int(input(\"Please enter your birth month\"))\n day=int(input(\"Please enter your birth day\"))\n gigasecond(year, month, day)\n\nif __name__ == \"__main__\": main()\n", "sub_path": "gigasecond.py", "file_name": "gigasecond.py", "file_ext": "py", "file_size_in_byte": 1389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.date", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "calendar.weekday", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "37747324", "text": "from unl.labeller import *\nimport unl.models as models\nimport cqlengine as ce\nimport currenv\nfrom datetime import datetime\nfrom unl import constants\n\n\ndef create_attribute(attr_type, name, description, choices=None):\n attr = models.AttributeModel.objects(name=name).allow_filtering().first()\n atm = models.AttributeTypeModel.objects(name=attr_type).allow_filtering().first()\n if atm is None:\n raise ValueError('Invalid attribute type')\n if attr is None:\n attr = models.AttributeModel.create(name=name, \n description=description, \n attribute_type_id=atm.id,\n created_at=datetime.now())\n if attr.attribute_type_id != atm.id:\n raise ValueError('Attribute exists with name \"%s\" but the type is not consistent' % name)\n if attr.description != description:\n raise ValueError('Attribute exists with name \"%s\" but the description is not consistent' % name)\n\n return attr\n\ndef create_namespace(name, description):\n ns = models.AnnotationNamespaceModel.objects(name=name).allow_filtering().first()\n if ns is None:\n ns = models.AnnotationNamespaceModel.create(name=name, \n description=description,\n created_at=datetime.now())\n if ns.description != description:\n raise ValueError('Namespace exists with name \"%s\" but the description is not consistent' % name)\n\n return ns\n \ndef label_attribute(usls, attr, namespace, labeller):\n existing = models.AttributeValueModel.objects(attribute_id=attr.id, namespace_id=namespace.id).allow_filtering()\n atm = models.AttributeTypeModel.objects(id=attr.attribute_type_id).allow_filtering().first()\n\n for usls_seq in usls:\n values = labeller.label(usls_seq)\n for usl,v in zip(usls_seq,values):\n try:\n av = existing.get(usl_id=usl.id)\n if av.value != v:\n raise ValueError('Inconsistent values')\n except ce.query.DoesNotExist:\n m = models.attr_type_to_model[atm.name]\n av = m.create(attribute_id=attr.id, \n namespace_id=namespace.id, \n usl_id=usl.id,\n value=v)\n except ce.query.MultipleObjectsReturned:\n raise ValueError('There are multiple objects')\n pass\n\n\nif __name__ == '__main__':\n\n env = currenv.CurrentEnvironment()\n conn = ce.connection.setup([env.cassandra_ip], env.cassandra_dev_keyspace, port=env.cassandra_port)\n roots = models.USLModel.get_roots()\n usls = [child.get_flattened_children() for root in roots for child in root.get_child_entries()]\n\n ns = create_namespace('master', 'The master namespace. The master branch should only contain factual annotations.')\n\n ########\n _name = 'stimulus'\n attr = create_attribute(constants._type_text, _name, 'The string that identifies the stimulus')\n label_attribute(usls, attr, ns, AutomaticStimulusLabeller())\n \n\n #############\n _name = 'lower_alphanumeric_only'\n attr = create_attribute(constants._type_text, _name, 'The alphanumeric characters in the stimulus in lower case')\n label_attribute(usls, attr, ns, AutomaticLowerAlphaNumericOnlyLabeller())\n\n\n '''\n NUM_OF_CHARACHTERS\n '''\n '''\n _name = 'num_of_characters'\n attr = create_attribute(constants._type_integer, \n _name, \n 'the number of characters in the presented stimulus, including punctuation')\n label_attribute(usls, attr, ns, AutomaticNumOfCharLabeller())\n '''\n\n '''\n AUTOMATIC_POS\n '''\n ns = create_namespace('automatic', 'Namespace of labels that were computed automatically (e.g. pos tag)')\n _name = 'pos'\n attr = create_attribute(constants._type_text, \n _name, \n 'automatic part of speech tags')\n label_attribute(usls, attr, ns, AutomaticPOSLabeller())\n\n pass\n", "sub_path": "label_unlabelled.py", "file_name": "label_unlabelled.py", "file_ext": "py", "file_size_in_byte": 3624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unl.models.AttributeModel.objects", "line_number": 10, "usage_type": "call"}, {"api_name": "unl.models.AttributeModel", "line_number": 10, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 10, "usage_type": "name"}, {"api_name": "unl.models.AttributeTypeModel.objects", "line_number": 11, "usage_type": "call"}, {"api_name": "unl.models.AttributeTypeModel", "line_number": 11, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 11, "usage_type": "name"}, {"api_name": "unl.models.AttributeModel.create", "line_number": 15, "usage_type": "call"}, {"api_name": "unl.models.AttributeModel", "line_number": 15, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "unl.models.AnnotationNamespaceModel.objects", "line_number": 27, "usage_type": "call"}, {"api_name": "unl.models.AnnotationNamespaceModel", "line_number": 27, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 27, "usage_type": "name"}, {"api_name": "unl.models.AnnotationNamespaceModel.create", "line_number": 29, "usage_type": "call"}, {"api_name": "unl.models.AnnotationNamespaceModel", "line_number": 29, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "unl.models.AttributeValueModel.objects", "line_number": 38, "usage_type": "call"}, {"api_name": "unl.models.AttributeValueModel", "line_number": 38, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 38, "usage_type": "name"}, {"api_name": "unl.models.AttributeTypeModel.objects", "line_number": 39, "usage_type": "call"}, {"api_name": "unl.models.AttributeTypeModel", "line_number": 39, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 39, "usage_type": "name"}, {"api_name": "cqlengine.query", "line_number": 48, "usage_type": "attribute"}, {"api_name": "unl.models.attr_type_to_model", "line_number": 49, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 49, "usage_type": "name"}, {"api_name": "cqlengine.query", "line_number": 54, "usage_type": "attribute"}, {"api_name": "currenv.CurrentEnvironment", "line_number": 61, "usage_type": "call"}, {"api_name": "cqlengine.connection.setup", "line_number": 62, "usage_type": "call"}, {"api_name": "cqlengine.connection", "line_number": 62, "usage_type": "attribute"}, {"api_name": "unl.models.USLModel.get_roots", "line_number": 63, "usage_type": "call"}, {"api_name": "unl.models.USLModel", "line_number": 63, "usage_type": "attribute"}, {"api_name": "unl.models", "line_number": 63, "usage_type": "name"}, {"api_name": "unl.constants._type_text", "line_number": 70, "usage_type": "attribute"}, {"api_name": "unl.constants", "line_number": 70, "usage_type": "name"}, {"api_name": "unl.constants._type_text", "line_number": 76, "usage_type": "attribute"}, {"api_name": "unl.constants", "line_number": 76, "usage_type": "name"}, {"api_name": "unl.constants._type_text", "line_number": 96, "usage_type": "attribute"}, {"api_name": "unl.constants", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "584206798", "text": "import matplotlib.pyplot as plt\nimport signal_generator\nimport complexity_generator\nimport statistics_utils\n\nHARMONICS = 8\nFREQUENCY = 1100\nDISCRETE_CALLS = 256\n\nsignal_first = signal_generator.generate_signal(\n HARMONICS,\n FREQUENCY,\n DISCRETE_CALLS\n)\nsignal_second = signal_generator.generate_signal(\n HARMONICS,\n FREQUENCY,\n DISCRETE_CALLS\n)\n\nauto_correlation_signal = statistics_utils.auto_correlation(signal_first)\ncross_correlation_signal = statistics_utils.cross_correlation(signal_first, signal_second)\n\nauto_correlation_time = complexity_generator.calculate_time_correlation(\n HARMONICS,\n FREQUENCY,\n DISCRETE_CALLS,\n 'auto'\n)\nprint('auto_correlation_time', auto_correlation_time)\n\ncross_correlation_time = complexity_generator.calculate_time_correlation(\n HARMONICS,\n FREQUENCY,\n DISCRETE_CALLS,\n 'cross'\n)\nprint('cross_correlation_time', cross_correlation_time)\n\nfig, (((ax00), (ax01)), ((ax10), (ax11))) = plt.subplots(2, 2)\nfig.suptitle('Laboratorka 1.2')\nfig.set_size_inches(18.5, 10.5)\n\nax00.plot(signal_first)\nax00.set_title('Generate first signal')\nax00.set(xlabel='time', ylabel='generated signal')\n\nax01.plot(signal_second)\nax01.set_title('Generate second signal')\nax01.set(xlabel='time', ylabel='generated signal')\n\nax10.plot(auto_correlation_signal)\nax10.set_title('Autocorrelation of first signal')\nax10.set(xlabel='tau', ylabel='correlation')\n\nax11.plot(cross_correlation_signal)\nax11.set_title('Crosscorrelation of first and second signals')\nax11.set(xlabel='tau', ylabel='correlation')\n\nfig.savefig('lab1_2.png')\n\nplt.show()\n", "sub_path": "lab1/lab1_2.py", "file_name": "lab1_2.py", "file_ext": "py", "file_size_in_byte": 1564, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "signal_generator.generate_signal", "line_number": 10, "usage_type": "call"}, {"api_name": "signal_generator.generate_signal", "line_number": 15, "usage_type": "call"}, {"api_name": "statistics_utils.auto_correlation", "line_number": 21, "usage_type": "call"}, {"api_name": "statistics_utils.cross_correlation", "line_number": 22, "usage_type": "call"}, {"api_name": "complexity_generator.calculate_time_correlation", "line_number": 24, "usage_type": "call"}, {"api_name": "complexity_generator.calculate_time_correlation", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "391330278", "text": "import praw\n\nr = praw.Reddit(user_agent='top_5_bot by u/bnsly')\nsubreddit = input(\"What subreddit would you like to check? \")\nsubmissions = r.get_subreddit(subreddit).get_top(limit=5)\ncount = 1\nprint(\"The top 5 posts from /r/{}\".format(subreddit))\nfor post in submissions:\n print((\"{}. {} -- <{}>\").format(count, post.title, post.url))\n count += 1\n", "sub_path": "reddit_top_5.py", "file_name": "reddit_top_5.py", "file_ext": "py", "file_size_in_byte": 356, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "praw.Reddit", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "386345446", "text": "from evaluate_multiple_assignments import *\nfrom west.region_united_states import RegionUnitedStates\nfrom west.ruleset_fcc2012 import RulesetFcc2012\nfrom west.boundary import BoundaryContinentalUnitedStates, BoundaryContinentalUnitedStatesWithStateBoundaries\nimport west.data_management\nimport data_management_vidya\nimport west.device\n\nimport protected_entities_tv_stations_vidya\n\nimport pickle\nimport numpy\nimport os\nimport csv\n\"\"\"The entire contents of this file is something like a main function. We are using all the functions from evaluate_multiple_assignments.\"\"\"\n\n\"\"\"Define region, as well as ruleset.\"\"\"\n\ndef not_function(latitude, longitude, latitude_index, longitude_index, current_value):\n return 1 - current_value\n\n\n\nregion = RegionUnitedStates()\nruleset = RulesetFcc2012()\nall_fids_considered_in_auction = []\nwith open(\"all_facility_ids_considered_in_auction.csv\", 'rU') as f:\n reader = csv.reader(f)\n for row in reader:\n all_fids_considered_in_auction.append(row[0])\n\n\n#Hack-y, done because location within boundary does not work with BoundaryContinentalUnitedStates.\nboundary = BoundaryContinentalUnitedStatesWithStateBoundaries()\nregion._boundary = boundary\n\n\n\"\"\"Define set of valid TVWS (and TV) channels.\"\"\"\ntvws_channel_list = region.get_tvws_channel_list()\n\n#Hack-y, done because channels 3 and 4 are being counted as WS (although this is not confirmed yet.)\ntvws_channel_list.append(3)\ntvws_channel_list.append(4)\ntvws_channel_list = sorted(tvws_channel_list)\n\ntv_channel_list = copy(tvws_channel_list)\n\n\n\"\"\"Load submaps.\"\"\"\ndef load_submaps(buffersize):\n with open(\"stamps_with_buffer=%dkm.pkl\"%buffersize, 'r') as f:\n stamps = pickle.load(f)\n\n return stamps\n\n\nbuffers = [0, 1.8, 4, 14.3]\nsubmaps = {}\n\nfor buffersize in buffers:\n submaps[buffersize] = load_submaps(buffersize)\n\nsubmaps_ws = {'fixed': {'cochannel': submaps[14.3], 'adjchannel': submaps[1.8]}, 'portable': {'cochannel': submaps[4], 'adjacent': []}}\nsubmaps_tv = submaps[0]\n\n#Because submaps by default have 1 for whitespace and 0 for excluded areas. For TV submaps, we want this the other way round.\nfor submap_tv in submaps_tv.values():\n submap_tv[1].update_all_values_via_function(not_function)\n\n\n\"\"\"Define all fundamental data maps that will be used.\"\"\"\ndatamap_spec = west.data_management.SpecificationDataMap(west.data_map.DataMap2DContinentalUnitedStates, 400, 600)\nis_in_region_map_spec = west.data_management.SpecificationRegionMap(BoundaryContinentalUnitedStates, datamap_spec)\nis_in_region_map = is_in_region_map_spec.fetch_data()\npopulation_map_spec = west.data_management.SpecificationPopulationMap(is_in_region_map_spec, west.population.PopulationData)\npopulation_map = population_map_spec.fetch_data()\nzero_map = west.data_map.DataMap2DContinentalUnitedStates.get_copy_of(is_in_region_map)\nzero_map.reset_all_values(0)\nplmrs_exclusions_map_spec = data_management_vidya.SpecificationPLMRSMap(is_in_region_map_spec, region, ruleset)\nplmrs_exclusions_map = plmrs_exclusions_map_spec.fetch_data()\n\n\"\"\"plmrs_file_path = \"data/plmrs_exclusions_map.pcl\"\nif os.path.isfile(plmrs_file_path):\n plmrs_exclusions_map = west.data_map.DataMap3D.from_pickle(plmrs_file_path)\nelse:\n plmrs_exclusions_map = createPLMRSExclusionsMap(region, tvws_channel_list, is_in_region_map, ruleset)\n plmrs_exclusions_map.to_pickle(plmrs_file_path)\"\"\"\n\n\n\"\"\"Define tunable parameters (device specifications, assignment types, etc)\"\"\"\n\ndevice_specification = DeviceSpecification.portable\nif device_specification == DeviceSpecification.fixed:\n device = west.device.Device(0, 30, 1) #Fixed, HAAT = 30, has geolocation.\nelse:\n device = west.device.Device(1, 30, 1)\n\nplmrs_exclusions_applied = PLMRSExclusions.plmrs_applied\nwhitespace_evaluation_type = WhitespaceEvaluationType.total\nquantity_to_evaluate = QuantityToPlot.whitespace\n\nif quantity_to_evaluate == QuantityToPlot.whitespace:\n pareto_filename = \"pareto_curve_data_ws_portable.csv\"\nelif quantity_to_evaluate == QuantityToPlot.tv:\n pareto_filename = \"pareto_curve_data_tv.csv\"\nelse:\n raise ValueError(\"Invalid quantity to evaluate entered. Please check input.\")\n\n\nset_of_bandplans = range(1, 38)\nnum_repacks_considered = 100\nassignment_type = AssignmentType.chopofftop\n\n\ncreate_pareto_data = True\n\"\"\"with open(os.path.join(\"data\", \"pareto_curve_data_tv.csv\"), 'w') as f:\n writer = csv.writer(f)\n writer.writerow([])\"\"\"\nupdate_old_entries = True\n\n\n\"\"\"Some miscellaneous functions.\"\"\"\ndef median_function(latitude, longitude, latitude_index, longitude_index, list_of_values_in_order):\n return numpy.median(list_of_values_in_order)\n\ndef stddev_function(latitude, longitude, latitude_index, longitude_index, list_of_values_in_order):\n return numpy.std(list_of_values_in_order)\n\ndef mean_function(latitude, longitude, latitude_index, longitude_index, list_of_values_in_order):\n return numpy.mean(list_of_values_in_order)\n\n\n\"\"\"Performing actual whitespace and TV evaluation.\"\"\"\n\n\nif assignment_type == AssignmentType.original:\n assignment = Assignment(assignment_type, quantity_to_evaluate,\n region, is_in_region_map_spec, plmrs_exclusions_map, ruleset,\n tv_channel_list, submaps_ws,\n 0, device = device, device_specification = device_specification, whitespace_evaluation_type = whitespace_evaluation_type,\n plmrs_indicator= plmrs_exclusions_applied)\n assignment.set_region(all_fids_considered_in_auction)\n #Evaluating whitespace/TV map\n if update_old_entries:\n total_map = assignment.make_data()\n else:\n total_map = assignment.fetch_data()\n cdf = west.data_manipulation.calculate_cdf_from_datamap2d(total_map, population_map, is_in_region_map)\n if create_pareto_data:\n assignment.write_entry_to_pareto_file(pareto_filename, cdf[2], cdf[3])\n\n\nif assignment_type == AssignmentType.repack:\n\n for n in set_of_bandplans:\n all_maps = west.data_map.DataMap3D.from_DataMap2D(is_in_region_map, range(num_repacks_considered))\n for i in range(num_repacks_considered):\n repack = Repack(RepackType.C, i)\n assignment = Assignment(assignment_type, quantity_to_evaluate,\n region, is_in_region_map_spec, plmrs_exclusions_map, ruleset,\n tv_channel_list, submaps_ws,\n n, repack = repack, device = device, device_specification = device_specification, whitespace_evaluation_type = whitespace_evaluation_type,\n plmrs_indicator = plmrs_exclusions_applied)\n assignment.set_region(all_fids_considered_in_auction)\n\n #Evaluating map\n if update_old_entries:\n all_maps.set_layer(i, assignment.make_data())\n else:\n all_maps.set_layer(i, assignment.fetch_data())\n\n\n if create_pareto_data:\n cdf = west.data_manipulation.calculate_cdf_from_datamap2d(all_maps.get_layer(i), population_map, is_in_region_map)\n assignment.write_entry_to_pareto_file(pareto_filename, cdf[2], cdf[3])\n\n median_map = all_maps.combine_values_elementwise_across_layers_using_function(median_function)\n mean_map = all_maps.combine_values_elementwise_across_layers_using_function(mean_function)\n stddev_map = all_maps.combine_values_elementwise_across_layers_using_function(stddev_function)\n median_map.to_pickle(os.path.join(\"data\", assignment.subdirectory, \"\".join([assignment.to_string(), \"_median.pcl\"])))\n mean_map.to_pickle(os.path.join(\"data\", assignment.subdirectory, \"\".join([assignment.to_string(), \"_mean.pcl\"])))\n stddev_map.to_pickle(os.path.join(\"data\", assignment.subdirectory, \"\".join([assignment.to_string(), \"_stddev.pcl\"])))\n\nif assignment_type == AssignmentType.chopofftop:\n\n for n in set_of_bandplans:\n assignment = Assignment(assignment_type, quantity_to_evaluate,\n region, is_in_region_map_spec, plmrs_exclusions_map, ruleset,\n tv_channel_list, submaps_ws,\n n, device = device, device_specification = device_specification, whitespace_evaluation_type = whitespace_evaluation_type,\n plmrs_indicator = plmrs_exclusions_applied)\n\n assignment.set_region(all_fids_considered_in_auction)\n\n #Evaluating map\n if update_old_entries:\n total_map = assignment.make_data()\n else:\n total_map = assignment.fetch_data()\n if create_pareto_data:\n cdf = west.data_manipulation.calculate_cdf_from_datamap2d(total_map, population_map, is_in_region_map)\n assignment.write_entry_to_pareto_file(pareto_filename, cdf[2], cdf[3])\n\n\n\n\n\n\"\"\"To-dos:\nProofread all code.\nRun.\n\"\"\"\n\n\n\n\n\n\n\n", "sub_path": "evaluate_multiple_assignments_main.py", "file_name": "evaluate_multiple_assignments_main.py", "file_ext": "py", "file_size_in_byte": 8911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "west.region_united_states.RegionUnitedStates", "line_number": 24, "usage_type": "call"}, {"api_name": "west.ruleset_fcc2012.RulesetFcc2012", "line_number": 25, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 28, "usage_type": "call"}, {"api_name": "west.boundary.BoundaryContinentalUnitedStatesWithStateBoundaries", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 52, "usage_type": "call"}, {"api_name": "west.region_united_states.data_management.SpecificationDataMap", "line_number": 72, "usage_type": "call"}, {"api_name": "west.region_united_states.data_management", "line_number": 72, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 72, "usage_type": "name"}, {"api_name": "west.region_united_states.data_map", "line_number": 72, "usage_type": "attribute"}, {"api_name": "west.region_united_states.data_management.SpecificationRegionMap", "line_number": 73, "usage_type": "call"}, {"api_name": "west.boundary.BoundaryContinentalUnitedStates", "line_number": 73, "usage_type": "argument"}, {"api_name": "west.region_united_states.data_management", "line_number": 73, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 73, "usage_type": "name"}, {"api_name": "west.region_united_states.data_management.SpecificationPopulationMap", "line_number": 75, "usage_type": "call"}, {"api_name": "west.region_united_states.data_management", "line_number": 75, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 75, "usage_type": "name"}, {"api_name": "west.region_united_states.population", "line_number": 75, "usage_type": "attribute"}, {"api_name": "west.region_united_states.data_map.DataMap2DContinentalUnitedStates.get_copy_of", "line_number": 77, "usage_type": "call"}, {"api_name": "west.region_united_states.data_map", "line_number": 77, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 77, "usage_type": "name"}, {"api_name": "data_management_vidya.SpecificationPLMRSMap", "line_number": 79, "usage_type": "call"}, {"api_name": "west.region_united_states.device.Device", "line_number": 94, "usage_type": "call"}, {"api_name": "west.region_united_states.device", "line_number": 94, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 94, "usage_type": "name"}, {"api_name": "west.region_united_states.device.Device", "line_number": 96, "usage_type": "call"}, {"api_name": "west.region_united_states.device", "line_number": 96, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 130, "usage_type": "call"}, {"api_name": "west.region_united_states.data_manipulation.calculate_cdf_from_datamap2d", "line_number": 148, "usage_type": "call"}, {"api_name": "west.region_united_states.data_manipulation", "line_number": 148, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 148, "usage_type": "name"}, {"api_name": "west.region_united_states.data_map.DataMap3D.from_DataMap2D", "line_number": 156, "usage_type": "call"}, {"api_name": "west.region_united_states.data_map", "line_number": 156, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 156, "usage_type": "name"}, {"api_name": "west.region_united_states.data_manipulation.calculate_cdf_from_datamap2d", "line_number": 174, "usage_type": "call"}, {"api_name": "west.region_united_states.data_manipulation", "line_number": 174, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 174, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "west.region_united_states.data_manipulation.calculate_cdf_from_datamap2d", "line_number": 201, "usage_type": "call"}, {"api_name": "west.region_united_states.data_manipulation", "line_number": 201, "usage_type": "attribute"}, {"api_name": "west.region_united_states", "line_number": 201, "usage_type": "name"}]} +{"seq_id": "508613411", "text": "import torch.nn as nn\nimport torch\nfrom scipy.special import comb\nimport math\nfrom scipy.stats import binom\nimport torch.nn.functional as F\n\n\nfrom torchvision import datasets, transforms\n\nclass JustBCE(nn.Module):\n def __init__(self,alpha=1):\n super(JustBCE, self).__init__()\n self.alpha=alpha\n\n def forward(self, input, target):\n half=(1-target)*torch.log(1-input)\n otherhalf=target*torch.log(input)\n\n ret= half+otherhalf\n ret=-ret.mean()\n\n return ret\n\nclass FakeBCE(nn.Module):\n def __init__(self,alpha=1):\n super(FakeBCE, self).__init__()\n self.alpha=alpha\n\n def forward(self, input, target):\n misclassification=(1-target)*torch.log(input)\n recovery=target*torch.log(input)\n\n ret= misclassification-self.alpha*recovery\n ret=ret.mean()\n # the converges to prediction = 0 or 1\n # it depends on the batch targets.\n return ret\n\n\nclass JustMultiply(nn.Module):\n def __init__(self,alpha=1):\n super(JustMultiply, self).__init__()\n self.alpha=alpha\n\n def forward(self, input, target):\n half=(1-target)*(1-input)\n otherhalf=target*input\n\n ret= half+otherhalf\n ret=-ret.mean()\n\n return ret\n\nclass FakeJustMultiply(nn.Module):\n def __init__(self, alpha=1):\n super(FakeJustMultiply, self).__init__()\n self.alpha = alpha\n\n def forward(self, input, target):\n misclass = (1 - target) * input\n recovery = target * input\n\n ret = misclass-recovery\n ret = ret.mean()\n\n return ret\n\nclass DummyModule(nn.Module):\n def __init__(self):\n super(DummyModule, self).__init__()\n self.weights=torch.Tensor([0.36]*10)\n self.weights=torch.nn.Parameter(self.weights)\n\n def forward(self,input):\n return self.weights+0*input\n\n\n# let's test it\ndef try_fake_BCE():\n\n def loop():\n optim.zero_grad()\n print(dm.weights)\n outputs = dm.forward(useless_input)\n loss = dl(outputs, goal)\n loss2 = bce(outputs, goal)\n print(\"The loss is\", loss.item())\n print(\"The BCE loss is\", loss2.item())\n loss.backward(retain_graph=True)\n optim.step()\n\n dl = FakeBCE()\n bce = torch.nn.BCELoss()\n dm = DummyModule()\n\n # the weights are set to be ones.\n # the goal are zeros. This will cause falsification.\n goal = torch.zeros((10))\n useless_input = torch.ones((10))\n optim = torch.optim.Adam(dm.parameters())\n\n for i in range(100):\n loop()\n for i in range(100):\n loop()\n for i in range(100):\n loop()\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4*4*50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4*4*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n#\n# def elaborate_test():\n# kwargs = {'num_workers': 4, 'pin_memory': True}\n# train_loader = torch.utils.data.DataLoader(\n# datasets.MNIST('data', train=True, download=True,\n# transform=transforms.Compose([\n# transforms.ToTensor(),\n# transforms.Normalize((0.1307,), (0.3081,))\n# ])),\n# batch_size=32, shuffle=True, **kwargs)\n# test_loader = torch.utils.data.DataLoader(\n# datasets.MNIST('data', train=False, transform=transforms.Compose([\n# transforms.ToTensor(),\n# transforms.Normalize((0.1307,), (0.3081,))\n# ])),\n# batch_size=32, shuffle=True, **kwargs)\n#\n# model=Net().cuda()\n# optimizer=torch.optim.Adam(model.parameters())\n# model.train()\n# for batch_idx, (data, target) in enumerate(train_loader):\n# data,target=data.cuda(),target.cuda()\n# optimizer.zero_grad()\n# output = model(data)\n# criterion=nn.CrossEntropyLoss()\n# loss=criterion(output,target)\n# loss.backward()\n# optimizer.step()\n# if batch_idx % 10 == 0:\n# print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n# 1, batch_idx * len(data), len(train_loader.dataset),\n# 100. * batch_idx / len(train_loader), loss.item()))\n\nif __name__ == '__main__':\n elaborate_test()", "sub_path": "AB_2/fakeBCE.py", "file_name": "fakeBCE.py", "file_ext": "py", "file_size_in_byte": 4618, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 101, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 110, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "182097146", "text": "from distutils.core import setup\nfrom setuptools import find_packages\n\ninstall_requires = ['requests', 'configobj>4.7.1']\n\nsetup(\n name='workbox-cli',\n version='0.1.5',\n author='Sam Tardif',\n author_email='sam.tardif@gmail.com',\n packages=find_packages(),\n url='http://bitbucket.org/samtardif/workbox-cli',\n license='MIT',\n description='A command line interface for Confluence ToolBox',\n long_description=file('README.txt').read(),\n entry_points = {'console_scripts': ['workbox = workboxcli:main']},\n install_requires = install_requires,\n zip_safe=False,\n)\n", "sub_path": "pypi_install_script/workbox-cli-0.1.5.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "distutils.core.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "118638924", "text": "import soco\nfrom texttable import Texttable\nimport sys\nimport logging\n\n\nclass Discover(object):\n \"\"\"\n Discover and link nearby sonos units\n \"\"\"\n\n _zones = None\n\n def __init__(self,zone_ip=None):\n self.__zone_ip = zone_ip\n\n def printZones(self):\n t = Texttable()\n t.add_row(['Zone Name', 'UID', 'Group', 'IP', 'Current Volume'])\n\n for z in self.zones:\n t.add_row([z.player_name, z.uid, z.group.coordinator.player_name, z.ip_address, z.volume])\n\n sys.stdout.write(t.draw())\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n def selectZone(self, zoneName):\n self.__selectedZone = None\n for z in self.zones:\n if z.player_name == zoneName:\n self.__selectedZone = z\n\n if not self.__selectedZone:\n raise Exception(\"Zone named %s is not found!\" % (zoneName))\n\n if self.__selectedZone.group.coordinator != self.__selectedZone:\n self.__selectedZone = self.__selectedZone.group.coordinator\n logging.warning(\"Zone %s is part of a group (%s). Target zone should be the coordinator node\" % (self.__selectedZone.player_name, self.__selectedZone.group.coordinator.player_name))\n\n\n @property\n def zones(self):\n if not self.__zone_ip:\n if not self._zones:\n self._zones = soco.discover(timeout=5)\n else:\n logging.info(\"Discovering with custom IP\")\n self._zones = [soco.SoCo(self.__zone_ip)]\n return self._zones\n\n @property\n def groupMaster(self):\n return self.groupZones[0].group.coordinator\n\n __groupZones = None\n @property\n def groupZones(self):\n if not self.__groupZones:\n self.__groupZones = []\n\n # FUN FACT! Asking for the player name or UID sends a GET request to Sonos, every. time.\n # I have no idea why\n for z in self.__selectedZone.group.members:\n z._uid = str(z.uid)\n z._player_name = str(z.player_name)\n self.__groupZones.append(z)\n return self.__groupZones\n\n @property\n def settings(self):\n settings = {\n 'volume':{},\n 'current_queue_position':int(self.groupMaster.get_current_track_info()['playlist_position']),\n 'current_playing_state':self.groupMaster.get_current_transport_info()['current_transport_state']\n }\n\n for z in self.groupZones:\n settings['volume'][z.uid] = int(z.volume)\n return settings\n", "sub_path": "sonosalarm/discovery.py", "file_name": "discovery.py", "file_ext": "py", "file_size_in_byte": 2538, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "texttable.Texttable", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 39, "usage_type": "call"}, {"api_name": "soco.discover", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 48, "usage_type": "call"}, {"api_name": "soco.SoCo", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "160365071", "text": "#!/bin/env python\n\nimport argparse\nimport os\nimport os.path\nfrom os import path\nimport glob\n\nparser = argparse.ArgumentParser(description='Description of your program')\nparser.add_argument('--length', help = \"chunck length\", required=True)\nargs = vars(parser.parse_args())\n\nchunck = args['length']\n\ncmd = \"sed -i '1d' *.fasta\"\nos.system(cmd)\n\ny=1\n\nListFile=[f for f in os.listdir('.')]\n\nfor x in range (1, len(ListFile)):\n\ty=1\n\tif (path.exists(\"pilonSRctg\"+str(x)+\".fasta\")):\n\t\ty=1\n\telse:\n\t\t\twith open(\"pilonSRctg\"+str(x)+\".fasta\",\"a\") as a1:\n\t\t\t\twhile (glob.glob(\"pilonSRctg\"+str(x)+\":\"+str(y)+\"-*.fasta\")):\n\t\t\t\t\twith open(glob.glob(\"pilonSRctg\"+str(x)+\":\"+str(y)+\"-*.fasta\")[0],\"r\") as a:\n\t\t\t\t\t\tfor line in a:\n\t\t\t\t\t\t\ta1.write(line)\n\t\t\t\t\t\ta.close()\n\t\t\t\t\t\ty+=int(chunck)\n\t\t\t\ta1.close()\n\nfor x in range(1, len(ListFile)):\n\tif (path.exists(\"pilonSRctg\"+str(x)+\".fasta\")):\n\t\twith open(\"pilonSRctg\"+str(x)+\".fasta\", 'r') as f:\n\t\t\tLenFile=0\n\t\t\tfor line in f:\n\t\t\t\t\tLenFile+=len(line.replace(\"\\n\",\"\"))\n\t\t\tf.close()\n\t\twith open(\"pilonSRctg\"+str(x)+\".fasta\", 'r') as fx:\n\t\t\twith open(\"pilonOut.fa\",\"a\") as f1:\n\t\t\t\tif(LenFile > 0):\n\t\t\t\t\tf1.write(\"\\n>ctg\"+str(x)+\" len=\"+str(LenFile)+\"\\n\")\n\t\t\t\t\tfor line in fx:\n\t\t\t\t\t\tf1.write(line.replace(\"\\n\",\"\"))\n\t\t\tf1.close()\n\t\tfx.close()\n\ncmd = \"sed -i '1d' pilonOut.fa\"\nos.system(cmd)\n", "sub_path": "bin/FastaConcat.py", "file_name": "FastaConcat.py", "file_ext": "py", "file_size_in_byte": 1313, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "os.system", "line_number": 16, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 28, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "name"}, {"api_name": "os.system", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "276284637", "text": "import numpy as np\nimport pyhrv\nfrom .. import utils\nimport json\n\n\ndef nonlinear_geo_features(flag, signal):\n \"\"\"Compute non-linear and geometric characteristic metrics describing the signal.\n\n Parameters\n ----------\n sampling_rate : flag\n Events location indices.\n signal : array\n Input signal.\n Returns\n ------- sd1, sd2, sd12, poincarea, sample_entropy, dfa_alpha1, dfa_alpha2, tinn_n, tinn_m, tinn, triangular_index\n sd1 : float\n Standard deviation of the major axis in the Poincaré Plot.\n\n sd2 : float\n Standard deviation of the minor axis in the Poincaré Plot.\n\n sd12 : float\n Ratio between SD2 and SD1 (SD2/SD1).\n\n poincarea : float\n Area of the Poincaré Plot fitted ellipse.\n\n sample_entropy : float\n Sample entropy of the NNI series.\n\n dfa_alpha1 : float\n Alpha value of the short term Detrended Fluctuation Analysis of an NNI series.\n\n dfa_alpha2 : float\n Alpha value of the long term Detrended Fluctuation Analysis of an NNI series.\n\n tinn_n : float\n N value of the TINN computation.\n\n tinn_m : float\n M value of the TINN computation.\n\n tinn : float\n Baseline width of the NNI histogram based on the triangular Interpolation (TINN).\n\n triangular_index : float\n Ratio between the total number of NNIs and the maximum of the NNI histogram distribution.\n\n References\n ----------\n Gomes, Pedro & Margaritoff, Petra & Plácido da Silva, Hugo. (2019). pyHRV: Development and Evaluation of an Open-Source Python Toolbox for Heart Rate Variability (HRV).\n\n \"\"\"\n signal = np.array(signal)\n dict = json.load(open('nonlinear_geo_features_log.json'))\n args, names = [], []\n\n try:\n flag_int = signal[pyhrv.tools.nn_intervals(flag)].astype(np.float)\n except:\n flag_int = None\n\n # Non-Linear features\n try:\n _, sd1, sd2, sd12, poincarea = pyhrv.nonlinear.poincare(flag_int, show=False, plot=False, legend=False)[:]\n except:\n sd1, sd2, sd12, poincarea = None, None, None, None\n if dict['sd1']['use'] == 'yes':\n args += [sd1]\n names += ['sd1']\n\n if dict['sd2']['use'] == 'yes':\n args += [sd2]\n names += ['sd2']\n\n if dict['sd12']['use'] == 'yes':\n args += [sd12]\n names += ['sd12']\n\n if dict['poincarea']['use'] == 'yes':\n args += [poincarea]\n names += ['poincarea']\n\n if dict['sample_entropy']['use'] == 'yes':\n try:\n sample_entropy = pyhrv.nonlinear.sample_entropy(flag_int)[0]\n except:\n sample_entropy = None\n args += [sample_entropy]\n names += ['sample_entropy']\n\n try:\n _, dfa_alpha1, dfa_alpha2 = pyhrv.nonlinear.dfa(flag_int, show=False, legend=False)[:]\n except:\n dfa_alpha1, dfa_alpha2 = None, None\n\n if dict['dfa_alpha1']['use'] == 'yes':\n args += [dfa_alpha1]\n names += ['dfa_alpha1']\n\n if dict['dfa_alpha2']['use'] == 'yes':\n args += [dfa_alpha2]\n names += ['dfa_alpha2']\n\n # Geometrical features\n try:\n tinn = pyhrv.time_domain.tinn(nni=flag_int, show=False, legend=False, plot=False)[0]\n tinn_n = tinn['tinn_n']\n tinn_m = tinn['tinn_m']\n tinn = tinn['tinn']\n except:\n tinn_n, tinn_m, tinn = None, None, None\n\n if dict['tinn']['use'] == 'yes':\n args += [tinn]\n names += ['tinn']\n if dict['tinn_n']['use'] == 'yes':\n args += [tinn_n]\n names += ['tinn_n']\n if dict['tinn_m']['use'] == 'yes':\n args += [tinn_m]\n names += ['tinn_m']\n\n if dict['triangular_index']['use'] == 'yes':\n try:\n triangular_index = pyhrv.time_domain.triangular_index(flag_int, show=False, plot=False, legend=False)[0]\n except:\n triangular_index = None\n args += [triangular_index]\n names += ['triangular_index']\n\n return utils.ReturnTuple(tuple(args), tuple(names))\n ", "sub_path": "biosppy/features/nonlinear_geo_features.py", "file_name": "nonlinear_geo_features.py", "file_ext": "py", "file_size_in_byte": 3984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "pyhrv.tools.nn_intervals", "line_number": 61, "usage_type": "call"}, {"api_name": "pyhrv.tools", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pyhrv.nonlinear.poincare", "line_number": 67, "usage_type": "call"}, {"api_name": "pyhrv.nonlinear", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pyhrv.nonlinear.sample_entropy", "line_number": 88, "usage_type": "call"}, {"api_name": "pyhrv.nonlinear", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pyhrv.nonlinear.dfa", "line_number": 95, "usage_type": "call"}, {"api_name": "pyhrv.nonlinear", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pyhrv.time_domain.tinn", "line_number": 109, "usage_type": "call"}, {"api_name": "pyhrv.time_domain", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pyhrv.time_domain.triangular_index", "line_number": 128, "usage_type": "call"}, {"api_name": "pyhrv.time_domain", "line_number": 128, "usage_type": "attribute"}]} +{"seq_id": "66240747", "text": "#!/usr/bin/python\n \n# python libraries\nimport sys, datetime, json\nfrom pytz import timezone\n \n#google libraries\nfrom oauth2client.client import AccessTokenRefreshError\n \n# app files\nimport utils\n \ndef main(argv):\n get_stats()\n \ndef get_stats() :\n \n # Authenticate and construct service.\n service = sample_utils.initialize_service()\n \n #client_id - get this from your adsense login\n ad_client_id = 'pub-1711976718738240'\n \n #adsense timezone is usa pacific http://support.google.com/adsense/bin/answer.py?hl=en&answer=59143\n now_pacific = datetime.datetime.now(timezone('US/Pacific'))\n today_pacific = now_pacific.strftime('%Y-%m-%d')\n \n yesterday_pacific = now_pacific - datetime.timedelta(1)\n yesterday_pacific = yesterday_pacific.strftime('%Y-%m-%d')\n \n month_first = now_pacific.strftime('%Y-%m-01')\n \n # print the dates of today and yesterday, these are used to define timespans for the report\n print (today_pacific)\n print (yesterday_pacific)\n \n #print today_pacific\n try:\n all_data = {}\n \n sets = {\n 'today' : [today_pacific , today_pacific] ,\n 'yesterday' : [yesterday_pacific , yesterday_pacific] ,\n 'this_month' : [month_first , today_pacific]\n }\n \n for k,v in sets.items() :\n # Retrieve report. result is a json object\n result = service.reports().generate(\n startDate = v[0] , \n endDate = v[1] ,\n filter=['AD_CLIENT_ID==' + ad_client_id],\n metric=['PAGE_VIEWS', 'CLICKS', 'PAGE_VIEWS_CTR', 'COST_PER_CLICK', 'AD_REQUESTS_RPM', 'EARNINGS'],\n #dimension=['DATE'],\n #sort=['+DATE']\n ).execute()\n \n #dumping json object - you may want to dump, to see the structure and use next\n #print json.dumps(result, sort_keys=True, indent=4)\n \n # Display headers\n '''for header in result['headers']:\n print '%15s' % header['name'],\n print'''\n data = {}\n # Display results\n if 'rows' in result :\n row = result['rows'][0]\n \n data['page_view'] = row[0]\n data['clicks'] = row[1]\n data['ctr'] = row[2]\n data['cpc'] = row[3]\n data['rpm'] = row[4]\n data['earnings'] = row[5]\n \n '''for row in result['rows']:\n for column in row:\n print '%15s' % column ,\n print'''\n \n all_data[k] = data\n \n print (str(all_data))\n \n #authorization problem\n except AccessTokenRefreshError:\n print ('The credentials have been revoked or expired, please re-run the application to re-authorize')\n \n# main function\nif __name__ == '__main__':\n main(sys.argv) ", "sub_path": "API_scripts/adsense/adsense_make_scripts/report_draft.py", "file_name": "report_draft.py", "file_ext": "py", "file_size_in_byte": 3003, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 28, "usage_type": "call"}, {"api_name": "oauth2client.client.AccessTokenRefreshError", "line_number": 87, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 92, "usage_type": "attribute"}]} +{"seq_id": "123396043", "text": "\"\"\"\nParses historical versions of the NoroSTAT data-table and updates the\nappropriate databases. Currently uses snapshots from the WayBack Machine\n(archive.org). A more comprehensive archival service may be mementoweb.org,\nwhich appears to pull from many services that implement the Memento protocol,\nincluding archive.org. Manually downloaded snapshots could be recorded via this\nscript as well.\n\"\"\"\n\n# standard library\nimport re\nimport os\nimport time\nimport collections\n\n# first party\nfrom . import norostat_sql\nfrom . import norostat_raw\n\n\n\ndef main():\n norostat_sql.ensure_tables_exist()\n snapshot_dir = os.path.expanduser(\"~/norostat_history/wayback/websites/www.cdc.gov/norovirus/reporting/norostat/data-table.html/\")\n snapshot_version_counter = collections.Counter()\n for subdir in os.listdir(snapshot_dir):\n if re.match(r'[0-9]+', subdir) is not None:\n # appears to be snapshot dir\n snapshot_version_counter[subdir] = 0 # register that loop found this snapshot directory\n for norostat_capitalization in [\"norostat\",\"noroSTAT\"]:\n time.sleep(0.002) # ensure parse times are unique, assuming OS can accurately sleep and measure to ms precision\n path = os.path.join(snapshot_dir,subdir,\"norovirus\",\"reporting\",norostat_capitalization,\"data-table.html\")\n if os.path.isfile(path):\n print(\"Processing file \", path)\n with open(path, 'r') as datatable_file:\n content = datatable_file.read()\n wide_raw = norostat_raw.parse_content_to_wide_raw(content)\n long_raw = norostat_raw.melt_wide_raw_to_long_raw(wide_raw)\n norostat_sql.record_long_raw(long_raw)\n snapshot_version_counter[subdir] += 1\n print('Successfully uploaded the following snapshots, with the count indicating the number of data-table versions found inside each snapshot (expected to be 1, or maybe 2 if there was a change in capitalization; 0 indicates the NoroSTAT page was not found within a snapshot directory); just \"Counter()\" indicates no snapshot directories were found:', snapshot_version_counter)\n norostat_sql.update_point()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/acquisition/norostat/norostat_add_history.py", "file_name": "norostat_add_history.py", "file_ext": "py", "file_size_in_byte": 2144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.expanduser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "re.match", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "395757239", "text": "#!/usr/bin/python\nfrom ConfigParser import SafeConfigParser\nimport codecs,urllib2,json,sys\ntry:\n #Validating if the libraries are present Else it recommends to install\n import requests, validators\nexcept ImportError as error:\n print(error.__class__.__name__ + \": \" + error.message + \" Please Install \" + error.message.split()[3])\n sys.exit()\ndef api_call(api_url,guser):\n try:\n #Gets data from the github user account\n response = requests.get(url = api_url+guser+'/repos').json()\n if len(response) == 0:\n print ('No Public Repositories for the user', guser)\n else:\n for i in range(len(response)):\n print (response[i]['full_name'].split(guser,1)[1][1:])\n except :\n print('Exception occurred retreving data')\n#method to validate if the user is valid\ndef validate_user(api_url,guser):\n try:\n return urllib2.urlopen(api_url+guser).code\n except urllib2.HTTPError as err:\n return err.code\n#Method to validate if its a valid url\ndef validate_url(api_url):\n return validators.url(api_url)\n\ndef main():\n parser = SafeConfigParser()\n #Reading Api and url from the credentials file\n with codecs.open('credentials.ini', 'r') as cred_file:\n parser.readfp(cred_file)\n api_url = parser.get('Achievement_First', 'api_url')\n guser = parser.get('Achievement_First', 'guser')\n url_val =validate_url(api_url)\n user_val=validate_user(api_url,guser)\n if url_val != True:\n print('API URL is not valid')\n sys.exit()\n elif user_val != 200:\n print('Unable to validate the user because of',validate_user(api_url,guser))\n sys.exit()\n elif (validate_url(api_url)) and (validate_user(api_url,guser) == 200):\n api_call(api_url,guser)\n else:\n print('Please verify GitHub User and API')\n sys.exit()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "api_call.py", "file_name": "api_call.py", "file_ext": "py", "file_size_in_byte": 1917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.exit", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib2.HTTPError", "line_number": 25, "usage_type": "attribute"}, {"api_name": "validators.url", "line_number": 29, "usage_type": "call"}, {"api_name": "ConfigParser.SafeConfigParser", "line_number": 32, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "251569797", "text": "import xbmc\nimport xbmcgui\nimport xbmcaddon\nimport json\nimport time\nimport sys\nimport colorsys\nimport os\nimport datetime\nimport math\n#import netifaces\nimport urllib2\nfrom threading import Thread\nimport socket\nimport collections\n\n\n__addon__ = xbmcaddon.Addon()\n__addonS__ = sys.modules[ \"__main__\" ].__addon__\n\n__cwd__ = __addon__.getAddonInfo('path')\n__resource__ = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ) )\n\n\nsys.path.append (__resource__)\n\nfrom settings import *\nfrom tools import *\n\ntry:\n import requests\nexcept ImportError:\n xbmc.log(\"ERROR: Could not locate required library requests\")\n notify(\"XBMC Halu\", \"ERROR: Could not import Python requests\")\n\nxbmc.log(\"XBMC Halu service started, version: %s\" % get_version())\n\n\n\n\n\nportDiscovery = 1900 # Udp discovery port\nUDP_PORT = 2345\nUDP_IP = '10.255.255.255'\nlocalPort = 50123\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n\nplayer = None\ncapture = xbmc.RenderCapture()\n\nfmt = capture.getImageFormat()\n# BGRA or RGBA\n# xbmc.log(\"Hue Capture Image format: %s\" % fmt)\nfmtRGBA = fmt == 'RGBA'\n\n#rgbw = [[0 for x in xrange(3)] for x in xrange(4)]\nrgbw = [[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nmaximo = 0\n\nfactorR = float( 255.0 / 200.0 )\nfactorV = float( 255.0 / 81.0 )\nfactorA = float( 255.0 / 237.0 )\nfactorB = float( 255.0 / 36.0 )\n\n\n\n\n\nclass MyMonitor( xbmc.Monitor ):\n\tdef __init__( self, *args, **kwargs ):\n\t\txbmc.Monitor.__init__( self )\n\n\tdef onSettingsChanged( self ):\n\t\tlast = datetime.datetime.now()\n\t\th.updateDB()\nmonitor = MyMonitor()\n\nlogger = Logger()\n\ntry:\n\timport requests\nexcept ImportError:\n\tlogger.log(\"ERROR: Could not locate required library requests\")\n\tnotify(\"Kodi Halu\", \"ERROR: Could not import Python requests\")\n\n\n\nwaitTime = time.time()\n\n\n\nlogger.log(\"Kodi Halu, version: %s\" % get_version())\n\n\n\n\n\n\ndef rpiColor(r,g,b):\n\n\tbl = min(r, g, b)\t#white component\n\t\n\trc = r - bl \t\t\t#pure color component\n\tgc = g - bl\n\tbc = b - bl\n\n\tbl = bl * factorB\n\tr = rc * factorR\n\tg = gc * factorV\n\tb = bc * factorA\n\n\treturn int(r + bl), int(g + bl), int(b + bl) \n\n\n\n\n\n\nclass MyPlayer(xbmc.Player):\n\tduration = 0\n\n\n\tdef __init__(self):\n\t\txbmc.Player.__init__(self)\n\t\n\tdef onPlayBackStarted(self):\n\t\tif self.isPlayingVideo():\n\t\t\tlogger.log(\"Video starts\")\n\t\t\tqqthreadCapture.playingVideo = True\n\t\t\tqqthreadCapture.playingAudio = False\n\t\tif self.isPlayingAudio():\n\t\t\tlogger.log(\"Audio starts\")\n\t\t\tqqthreadCapture.playingVideo = False\n\t\t\tqqthreadCapture.playingAudio = True\n\t\t\t#logger.log(self.getAvailableAudioStreams())\n\n\tdef onPlayBackPaused(self):\n\n\t\tif self.isPlayingVideo():\n\t\t\tlogger.log(\"Video paused\")\n\t\tif self.isPlayingAudio():\n\t\t\tlogger.log(\"Audio paused\")\n\t\tqqthreadCapture.playingVideo = False\n\t\tqqthreadCapture.playingAudio = False\n\t\th.qq_postSpaceColor()\n\n\tdef onPlayBackResumed(self):\n\t\tif self.isPlayingVideo():\n\t\t\tlogger.log(\"Video reumed\")\n\t\t\tqqthreadCapture.playingVideo = True\n\t\t\tqqthreadCapture.playingAudio = False\n\t\tif self.isPlayingAudio():\n\t\t\tlogger.log(\"Audio resumed\")\n\t\t\tqqthreadCapture.playingVideo = False\n\t\t\tqqthreadCapture.playingAudio = True\n\n\tdef onPlayBackStopped(self):\n\t\tif self.isPlayingVideo():\n\t\t\tlogger.log(\"Video stoped\")\n\t\tif self.isPlayingAudio():\n\t\t\tlogger.log(\"Audio stoped\")\n\t\tqqthreadCapture.playingVideo = False\n\t\tqqthreadCapture.playingAudio = False\n\t\th.qq_postSpaceColor()\n\n\tdef onPlayBackEnded(self):\n\t\tif self.isPlayingVideo():\n\t\t\tlogger.log(\"Video ended\")\n\t\tif self.isPlayingAudio():\n\t\t\tlogger.log(\"Audio ended\")\n\t\tqqthreadCapture.playingVideo = False\n\t\tqqthreadCapture.playingAudio = False\n\t\th.qq_postSpaceColor()\n\n\ndef getAvgColor():\n\tglobal maximo\n\n\tr, g, b, w = 0, 0, 0, 0\n\n\tfor x in range(3):\n\t\tfor y in range(4):\n\t\t\trgbw[x][y] = 0\n\n\n\tif qqthreadCapture.playingVideo and (h.connected == True):\n\t\tcapture_width = 32 #100\n\t\tcapture_height = int(capture_width / capture.getAspectRatio())\n\n\t\twFifth = int(capture_width / 5)\n\t\thFifth = int(capture_height / 5)\n\t\thThird = int(capture_height / 3)\n\n\t\tcapture.capture(capture_width, capture_height, xbmc.CAPTURE_FLAG_CONTINUOUS)\n\t\t#capture.capture(capture_width, capture_height)\n\t\t#capture.capture()\n\t\t\n\t\tespera = time.time()\n\t\twhile capture.getCaptureState() != xbmc.CAPTURE_STATE_DONE:\n\t\t\tif not(qqthreadCapture.playingVideo):\n\t\t\t\tlogger.log(\"Not playing > do not capture!\")\n\t\t\t\treturn\n\t\t\t#time.sleep(0.001)\n\t\t\tif (time.time() - espera) > 8:\n\t\t\t\tlogger.log(\"estado:\" + str(capture.getCaptureState()) + \"done:\" + str(xbmc.CAPTURE_STATE_DONE) + \"working:\" + str(xbmc.CAPTURE_STATE_WORKING) + \"Failed:\" + str(xbmc.CAPTURE_STATE_FAILED))\n\t\t\t\tcapture.capture(capture_width, capture_height, xbmc.CAPTURE_FLAG_CONTINUOUS) \n\t\t\t\tlogger.log(\"long waiting for capture done, asking again\")\n\t\t\t\tespera = time.time()\n\t\tespera = time.time() - espera\n\n\t\t#logger.log(\"TIEMPO Espera: \" + str(espera))\n\n\t\tpix = time.time()\n\t\tif qqthreadCapture.playingVideo: \n\t\t\tpixels = capture.getImage()\n\t\telse:\n\t\t\treturn\n\t\tpix = time.time() - pix\n\n\t\tp = 0\n\t\tsize = int(len(pixels)/4)\n\t\tz1 = 0\t#pixel counter for left area\n\t\tz2 = 0\t# \t\t\t\t\"\t\t\t\t\tcenter up\n\t\tz3 = 0\t# \t\t\t\t\"\t\t\t\t\tright\n\n\t\tif h.settings.mode == 0:\t#one zone mode, full screen\n\t\t\t\n\t\t\tfor i in range(size):\n\t\t\t\tif fmtRGBA:\n\t\t\t\t\tr += pixels[p]\n\t\t\t\t\tg += pixels[p + 1]\n\t\t\t\t\tb += pixels[p + 2]\n\t\t\t\telse: #probably BGRA\n\t\t\t\t\tb += pixels[p]\n\t\t\t\t\tg += pixels[p + 1]\n\t\t\t\t\tr += pixels[p + 2]\n\t\t\t\tp += 4\n\t\t\trgbw[0][0] = r / size\n\t\t\trgbw[0][1] = g / size\n\t\t\trgbw[0][2] = b / size\n\t\t\tif h.settings.rpi:\n\t\t\t\trgbw[0][0], rgbw[0][1], rgbw[0][2] = rpiColor(rgbw[0][0], rgbw[0][1], rgbw[0][2])\n\n\t\t\trgbw[0][3] = min(rgbw[0][0], rgbw[0][1], rgbw[0][2]) / 4\n\t\t\t#logger.log(\"RGB[\"+ str(r) + ',' + str(g) + ',' + str(b) + '], rgbw[0]='+ str(rgbw[0]) + 'size: ' + str(size) )\n\n\t\telse:\n\t\t\t#logger.log(\"capture format: \" + fmt)\n\t\t\tfor i in range(size):\n\t\t\t\tif fmtRGBA:\n\t\t\t\t\tr = pixels[p]\n\t\t\t\t\tg = pixels[p + 1]\n\t\t\t\t\tb = pixels[p + 2]\n\t\t\t\telse: #probably BGRA\n\t\t\t\t\tb = pixels[p]\n\t\t\t\t\tg = pixels[p + 1]\n\t\t\t\t\tr = pixels[p + 2]\n\t\t\t\t#logger.log('p[' + str(pixels[p]) + ',' + str(pixels[p+1]) + ',' + str(pixels[p+2]) + ',' + str(pixels[p+3]) +']')\n\t\t\t\tp += 4\n\n\t\t\t\tcx = i % capture_width\n\t\t\t\tcy = i / capture_width\n\t\t\t\t\n\t\t\t\tif (cx < wFifth):\n\t\t\t\t\tif (cx > cy):\n\t\t\t\t\t\tz2 += 1\n\t\t\t\t\t\trgbw[1][0] += r\n\t\t\t\t\t\trgbw[1][1] += g \t#center up\n\t\t\t\t\t\trgbw[1][2] += b\n\t\t\t\t\telse:\n\t\t\t\t\t\tz1 += 1\n\t\t\t\t\t\trgbw[0][0] += r\n\t\t\t\t\t\trgbw[0][1] += g \t#left\n\t\t\t\t\t\trgbw[0][2] += b\n\t\t\t\telif (cx < (capture_width - wFifth)):\n\t\t\t\t\tif (cy < hThird):\n\t\t\t\t\t\tz2 += 1\n\t\t\t\t\t\trgbw[1][0] += r\n\t\t\t\t\t\trgbw[1][1] += g \t#center up\n\t\t\t\t\t\trgbw[1][2] += b\n\t\t\t\telif (cy < hFifth):\n\t\t\t\t\tif (cx - (capture_width - wFifth)) > cy :\n\t\t\t\t\t\tz2 += 1\n\t\t\t\t\t\trgbw[1][0] += r\n\t\t\t\t\t\trgbw[1][1] += g \t#center up\n\t\t\t\t\t\trgbw[1][2] += b\n\t\t\t\telse:\n\t\t\t\t\tz3 += 1\n\t\t\t\t\trgbw[2][0] += r\n\t\t\t\t\trgbw[2][1] += g \t#right\n\t\t\t\t\trgbw[2][2] += b\n\t\t\t\n\t\t\t#logger.log(\"rgbw=\"+ str(rgbw))\t\t\n\t\t\tif h.settings.rpi:\n\t\t\t\trgbw[0][0], rgbw[0][1], rgbw[0][2] = rpiColor(rgbw[0][0], rgbw[0][1], rgbw[0][2])\n\t\t\t\trgbw[1][0], rgbw[1][1], rgbw[1][2] = rpiColor(rgbw[1][0], rgbw[1][1], rgbw[1][2])\n\t\t\t\trgbw[2][0], rgbw[2][1], rgbw[2][2] = rpiColor(rgbw[2][0], rgbw[2][1], rgbw[2][2])\n\t\t\t#logger.log(\"RPIrgbw=\"+ str(rgbw))\n\t\t\trgbw[0][0] = rgbw[0][0] / z1\n\t\t\trgbw[0][1] = rgbw[0][1] / z1\n\t\t\trgbw[0][2] = rgbw[0][2] / z1\n\n\n\t\t\trgbw[0][3] = min(rgbw[0][0], rgbw[0][1], rgbw[0][2]) / 4\n\n\t\t\trgbw[1][0] = rgbw[1][0] / z2\n\t\t\trgbw[1][1] = rgbw[1][1] / z2\n\t\t\trgbw[1][2] = rgbw[1][2] / z2\n\t\t\trgbw[1][3] = min(rgbw[1][0], rgbw[1][1], rgbw[1][2]) / 4\n\n\t\t\trgbw[2][0] = rgbw[2][0] / z3\n\t\t\trgbw[2][1] = rgbw[2][1] / z3\n\t\t\trgbw[2][2] = rgbw[2][2] / z3\n\t\t\trgbw[2][3] = min(rgbw[2][0], rgbw[2][1], rgbw[2][2]) / 4\n\n\t\t\t#logger.log(\"End values: \" + str(rgbw))\n\n\t\t#logger.log(rgbw[0])\n\n\treturn \n\n\n\nclass Halu:\n\n\tconnected = False\n\tdatabase = {}\n\tlamp_db = []\n\tleft = []\n\tright = []\n\tcenterUp = []\n\t#timeFromPlay = time.time()\n\t\n\n\tdef __init__(self, settings):\n\t\t#self.discovery()\n\n\t\tself.settings = settings\n\t\tself.connected = False\n\t\tself.system = None\n\n\t\tself.updateDB()\n\t\tif self.connected:\n\t\t\tself.qq_postSpaceColor()\n\t\t\t\n\t\t\n\n\n\tdef updateDB(self):\n\t\t\n\t\tdatabase = {}\n\t\tself.settings.readxml()\n\t\tif self.settings.enable:\n\t\t\tlogger.log(\"settings.xml loaded,\\n %s\" % self.settings)\n\t\t\tif self.getDatabase():\n\t\t\t\tself.connected = True\n\t\t\t\tself.loadLamps()\t\t\t\n\t\t\telse:\n\t\t\t\tself.connected = False\n\t\telse:\n\t\t\tlogger.log(\" Halu disbabled from settings!\")\n\n\n\tdef loadLamps(self):\n\t\t\n\t\tself.left[:] = []\n\t\tself.right[:] = []\n\t\tself.centerUp[:] = []\n\n\t\tlogger.log(\"num lamps: %s\" % str(len(self.lamp_db)))\n\t\tfor i in range(len(self.lamp_db)):\n\t\t\tif (self.database[\"data\"][\"lamp_db\"][i][\"available\"] == True) and (self.database[\"data\"][\"lamp_db\"][i][\"space_id\"] == self.settings.playRoom):#space_id for reproductor\n\t\t\t\tif self.database[\"data\"][\"lamp_db\"][i][\"position\"][\"x\"] < 0:\n\t\t\t\t\tself.left.append(self.database[\"data\"][\"lamp_db\"][i]['id'])\n\t\t\t\telif self.database[\"data\"][\"lamp_db\"][i][\"position\"][\"x\"] >0:\n\t\t\t\t\tself.right.append(self.database[\"data\"][\"lamp_db\"][i]['id'])\n\t\t\t\telif (self.database[\"data\"][\"lamp_db\"][i][\"position\"][\"x\"] == 0) and (self.database[\"data\"][\"lamp_db\"][i][\"position\"][\"z\"] >0 ):\n\t\t\t\t\tself.centerUp.append(self.database[\"data\"][\"lamp_db\"][i]['id'])\n\n\t\tlogger.log(\"Left Lamps ID's: %s\" % str(self.left))\n\t\tlogger.log(\"right Lamps ID's: %s\" % str(self.right))\n\t\tlogger.log(\"centerUp Lamps ID's: %s\" % str(self.centerUp))\t\t\n\n\n\n\tdef discovery(self):\n\t\t#Thanks Nick Riviera!\n\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\ts.setblocking(0)\n\t\ts.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\t\ts.bind(('', localPort))\n\n\t\tfor interfaz in netifaces.interfaces() :\n\t\t\t#logger.log(interfaz)\n\t\t\taddrs = netifaces.ifaddresses(interfaz)\n\t\t\ttry :\n\t\t\t\tdata =\taddrs[netifaces.AF_INET]\n\t\t\t\t#logger.log(data)\n\t\t\texcept:\n\t\t\t\t#logger.log(\"no valid IPv4.\")\n\t\t\t\tcontinue\n\n\t\t\tfor i, d in enumerate([e for e in data if \"broadcast\" in e.keys()]):\n\n\t\t\t\t#logger.log(\"intentando con:\" + d[\"broadcast\"])\n\t\t\t\ttry:\n\t\t\t\t\ts.sendto(\"qq_discovery\", (d[\"broadcast\"], portDiscovery))\n\t\t\t\texcept Exception as error:\n\t\t\t\t\t#logger.log(\"error: %s\" % error)\n\t\t\t\t\tbreak\n\t\t\t\tstart = time.time()\n\t\t\t\twhile time.time() - start < 2 :\n\t\t\t\t\ttry:\n\t\t\t\t\t\tllega, address = s.recvfrom(4096)\n\t\t\t\t\texcept:\n\t\t\t\t\t\ts.sendto(\"qq_discovery\", (d[\"broadcast\"], portDiscovery))\n\t\t\t\t\t\ttime.sleep(0.05)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttry:\n\t\t\t\t\t\tjllega = json.loads(llega)\n\t\t\t\t\t\tipInt = jllega[\"data\"][\"access_point_db\"][\"ip\"]\n\t\t\t\t\t\tipExt = jllega[\"data\"][\"connection_db\"][\"ip\"]\n\n\t\t\t\t\texcept Exception as error:\n\t\t\t\t\t\t#logger.log(\"No es Halu!! %s\" % error)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tlogger.log(\"Halu encontrada!!\")\n\t\t\t\t\tself.connected = True\n\t\t\t\t\tself.settings.haluIp = address[0]\n\t\t\t\t\treturn self.connected\n\t\treturn 'None'\n\n\n\n\tdef getDatabase(self):\n\t\turlLamp = 'http://' + str(self.settings.haluIp) + ':2015/master/database'\n\t\ttry:\n\t\t\treq = requests.get(urlLamp)\n\t\texcept:\n\t\t\tlogger.log(\"Halu not found\")\n\n\t\t\treturn False\n\n\t\t#logger.log(req.status_code)\n\t\t#logger.log(req.headers)\n\t\t#logger.log(req.content)\n\t\ttry:\n\t\t\tself.database = json.loads(req.content)\n\t\texcept Exception as error:\n\t\t\tlogger.log(\"Error getting the Json database: %s\" % str(error))\n\t\t\treturn False\n\t\tself.lamp_db[:] = []\n\t\tself.lamp_db = self.database[\"data\"][\"lamp_db\"]\n\t\t#logger.log(lamp_db)\n\t\t#logger.log(\"constants.SYSTEM: \" + constants.SYSTEM)\n\t\tlogger.log(\"sys.platform: \" + sys.platform)\n\n\n\t\treturn True\n\n\n\n\tdef qq_postEffect(self):\n\t\turlEffect = 'http://' + str(self.settings.haluIp) + ':2015/master/effect'\n\n\t\tdata = {'duration': 1, 'priority': 1, 'steps': []}\n\n\t\tj = float(self.settings.effectsIntensity)/100\n\n\t\tif self.settings.mode ==0:\n\t\t\tdata[\"steps\"].append({'color' : {'components': {'r': rgbw[0][0], 'g': rgbw[0][1], 'b': rgbw[0][2], 'w': rgbw[0][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': self.settings.playRoom, 'type': 'space'}, 'start_time': 0})\n\t\telse:\n\n\t\t\tfor i in range(len(h.left)):\n\t\t\t\tdata[\"steps\"].append({'color' : {'components': {'r': rgbw[0][0], 'g': rgbw[0][1], 'b': rgbw[0][2], 'w': rgbw[0][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': h.left[i], 'type': 'lamp'}, 'start_time': 0})\n\t\t\tfor i in range(len(h.centerUp)):\n\t\t\t\tdata[\"steps\"].append({'color' : {'components': {'r': rgbw[1][0], 'g': rgbw[1][1], 'b': rgbw[1][2], 'w': rgbw[1][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': h.centerUp[i], 'type': 'lamp'}, 'start_time': 0})\n\t\t\tfor i in range(len(h.right)):\n\t\t\t\tdata[\"steps\"].append({'color' : {'components': {'r': rgbw[2][0], 'g': rgbw[2][1], 'b': rgbw[2][2], 'w': rgbw[2][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': h.right[i], 'type': 'lamp'}, 'start_time': 0})\n\n\t\t#logger.log(urlEffect)\n\t\t#logger.log(data)\n\t\treq = urllib2.Request(urlEffect)\n\t\treq.add_header('Content-Type', 'application/json')\n\t\t#response = urllib2.urlopen(req, json.dumps(data))\n\t\t#logger.log(urlEffect)\n\t\t#logger.log(data)\n\t\t#logger.log(response)\n\t\tif qqthreadCapture.playingVideo: \n\t\t\ttry:\n\t\t\t\tresponse = urllib2.urlopen(req, json.dumps(data))\t\n\t\t\texcept Exception as error:\n\t\t\t\tlogger.log(\"fail on POST: %s\" % str(error))\n\t\t\t\tlogger.log(\"url: %s\" % urlEffect)\n\t\t\t\tlogger.log(\"data: %s\" % data)\n\t\t\n\t\treturn\n\n\n\n\tdef qq_sendUDP(self):\n\t\t#data = collections.OrderedDict((( \"method\" , \"POST\"), (\"target\" , \"lamp\"), (\"action\", \"effect\"), (\"data\", { 'steps': [] })))\n\n\t\tdata = {\"method\": \"POST\", \"target\" : \"lamp\", \"action\": \"effect\", 'data':{'duration': 1, 'priority': 1, 'steps': []}}\n\n\t\tj = float(self.settings.effectsIntensity)/100\n\n\t\tif self.settings.mode ==0:\n\t\t\tdata[\"data\"][\"steps\"].append({'color' : {'components': {'r': rgbw[0][0], 'g': rgbw[0][1], 'b': rgbw[0][2], 'w': rgbw[0][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': self.settings.playRoom, 'type': 'spaceUdp'}, 'start_time': 0})\n\t\telse:\n\n\t\t\tfor i in range(len(h.left)):\n\t\t\t\tdata[\"data\"][\"steps\"].append({'color' : {'components': {'r': rgbw[0][0], 'g': rgbw[0][1], 'b': rgbw[0][2], 'w': rgbw[0][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': h.left[i], 'type': 'lampUdp'}, 'start_time': 0})\n\t\t\tfor i in range(len(h.centerUp)):\n\t\t\t\tdata[\"data\"][\"steps\"].append({'color' : {'components': {'r': rgbw[1][0], 'g': rgbw[1][1], 'b': rgbw[1][2], 'w': rgbw[1][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': h.centerUp[i], 'type': 'lampUdp'}, 'start_time': 0})\n\t\t\tfor i in range(len(h.right)):\n\t\t\t\tdata[\"data\"][\"steps\"].append({'color' : {'components': {'r': rgbw[2][0], 'g': rgbw[2][1], 'b': rgbw[2][2], 'w': rgbw[2][3]}, 'fade' : 0.07, 'intensity' : j},'target': {'id': h.right[i], 'type': 'lampUdp'}, 'start_time': 0})\n\t\tif qqthreadCapture.playingVideo: \n\t\t\ttry: \n\t\t\t\tsock.sendto(json.dumps(data), (UDP_IP, UDP_PORT))\n\t\t\texcept Exception as error:\n\t\t\t\tlogger.log(\"fail sending udp effect, reason: %s\" % str(error))\n\n\n\tdef qq_postSpaceColor(self):\n\n\t\tif self.settings.enable:\n\t\t\n\t\t\tlogger.log(\"sending space commad for idle state.\")\n\t\t\turlEffect = 'http://' + str(self.settings.haluIp) + ':2015/space/' + str(self.settings.playRoom) + '/color'\n\t\t\t\n\n\t\t\tdata = {'components': {'r': 255, 'g': 255, 'b': 150, 'w': 255}, 'fade' : 1, 'intensity' : float(self.settings.idleLight)/100}\n\n\t\t\treq = urllib2.Request(urlEffect)\n\t\t\treq.add_header('Content-Type', 'application/json')\n\n\t\t\ttry:\n\t\t\t\tresponse = urllib2.urlopen(req, json.dumps(data))\t\n\t\t\texcept Exception as error:\n\t\t\t\tlogger.log(\"fail on POST: %s\" % str(error))\n\t\t\t\tlogger.log(\"url: %s\" % urlEffect)\n\t\t\t\tlogger.log(\"data: %s\" % data)\n\t\telse:\n\t\t\tlogger.log(\"No space order sent because Halu is disbabled.\")\n\t\treturn\n\n\n\n\n\n\nclass loop(Thread):\n\n\tdef __init__(self):\n\t\t''' Constructor. '''\n\t\tThread.__init__(self)\n\t\tself.playingVideo = False\n\t\tself.playingAudio = False\n\n\t\tself.exit = False\n\n\tdef run(self):\n\t\tglobal waitTime\n\t\tglobal img\n\t\tmethod = 'None'\n\n\t\twhile not(self.exit) :\n\t\t\twaitTime = time.time() - waitTime\n\t\t\tif (h.connected == True) and h.settings.enable :\n\t\t\t\tcolorTime = time.time()\n\t\t\t\tgetAvgColor()\n\t\t\t\tcolorTime = time.time() -colorTime\n\n\t\t\t\tsendTime = time.time()\n\t\t\t\tif self.playingVideo:\n\t\t\t\t\tif h.settings.protocol == 0 :\t#0 = TCP, 1 = UDP\n\t\t\t\t\t\th.qq_postEffect()\n\t\t\t\t\t\tmethod ='tcp'\n\t\t\t\t\telse :\t\n\t\t\t\t\t\th.qq_sendUDP()\n\t\t\t\t\t\tmethod = 'udp'\n\t\t\t\t\tsendTime = time.time() - sendTime\n\t\t\t\t\tseconds = waitTime + colorTime + sendTime \n\t\t\t\t\tlogger.log(method + \"FPS:{0}\".format(1/seconds) + \" waitTime:\" + str(waitTime) + \" ColorTime:\" + str(colorTime) + \" PostTime:\" + str(sendTime))\n\t\t\t\t\n\t\t\telif self.playingAudio and (h.connected == True):\n\t\t\t\tlogger.log(\"audio playing\")\n\t\t\t\txbmc.sleep(2000)\n\t\t\telse:\n\t\t\t\txbmc.sleep(2000)\n\t\t\twaitTime = time.time()\n\t\t\txbmc.sleep(h.settings.delay)\n\n\n\n\n\nlogger.log(\"Halu init!!\")\n\nsett = settings()\n\n\nh = Halu(sett)\nqqthreadCapture = loop()\n\n\nif ( __name__ == \"__main__\" ):\n\tqqthreadCapture.start()\n\tlast = datetime.datetime.now()\n\twhile not xbmc.abortRequested:\n\n\t\tif player == None:\n\t\t\t\n\t\t\tplayer = MyPlayer()\n\t\txbmc.sleep(100)\n\n\tlogger.log(\"exiting capture thread\")\n\tqqthreadCapture.exit = True\n\txbmc.sleep(200)\n\tqqthreadCapture.join()\n", "sub_path": "default.py", "file_name": "default.py", "file_ext": "py", "file_size_in_byte": 16945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "xbmcaddon.Addon", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 19, "usage_type": "attribute"}, {"api_name": "xbmc.translatePath", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "xbmc.log", "line_number": 33, "usage_type": "call"}, {"api_name": "xbmc.log", "line_number": 36, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 47, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 47, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 47, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 48, "usage_type": "attribute"}, {"api_name": "socket.SO_BROADCAST", "line_number": 48, "usage_type": "attribute"}, {"api_name": "xbmc.RenderCapture", "line_number": 52, "usage_type": "call"}, {"api_name": "xbmc.Monitor", "line_number": 72, "usage_type": "attribute"}, {"api_name": "xbmc.Monitor.__init__", "line_number": 74, "usage_type": "call"}, {"api_name": "xbmc.Monitor", "line_number": 74, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 91, "usage_type": "call"}, {"api_name": "xbmc.Player", "line_number": 122, "usage_type": "attribute"}, {"api_name": "xbmc.Player.__init__", "line_number": 127, "usage_type": "call"}, {"api_name": "xbmc.Player", "line_number": 127, "usage_type": "attribute"}, {"api_name": "xbmc.CAPTURE_FLAG_CONTINUOUS", "line_number": 197, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 201, "usage_type": "call"}, {"api_name": "xbmc.CAPTURE_STATE_DONE", "line_number": 202, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 207, "usage_type": "call"}, {"api_name": "xbmc.CAPTURE_STATE_DONE", "line_number": 208, "usage_type": "attribute"}, {"api_name": "xbmc.CAPTURE_STATE_WORKING", "line_number": 208, "usage_type": "attribute"}, {"api_name": "xbmc.CAPTURE_STATE_FAILED", "line_number": 208, "usage_type": "attribute"}, {"api_name": "xbmc.CAPTURE_FLAG_CONTINUOUS", "line_number": 209, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 211, "usage_type": "call"}, {"api_name": "time.time", "line_number": 212, "usage_type": "call"}, {"api_name": "time.time", "line_number": 216, "usage_type": "call"}, {"api_name": "time.time", "line_number": 221, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 392, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 392, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 392, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 394, "usage_type": "attribute"}, {"api_name": "socket.SO_BROADCAST", "line_number": 394, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 415, "usage_type": "call"}, {"api_name": "time.time", "line_number": 416, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 421, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 424, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 442, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 452, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 460, "usage_type": "attribute"}, {"api_name": "urllib2.Request", "line_number": 487, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 495, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 495, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 524, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 539, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 543, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 543, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 557, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 561, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 561, "usage_type": "name"}, {"api_name": "time.time", "line_number": 573, "usage_type": "call"}, {"api_name": "time.time", "line_number": 575, "usage_type": "call"}, {"api_name": "time.time", "line_number": 577, "usage_type": "call"}, {"api_name": "time.time", "line_number": 579, "usage_type": "call"}, {"api_name": "time.time", "line_number": 587, "usage_type": "call"}, {"api_name": "xbmc.sleep", "line_number": 593, "usage_type": "call"}, {"api_name": "xbmc.sleep", "line_number": 595, "usage_type": "call"}, {"api_name": "time.time", "line_number": 596, "usage_type": "call"}, {"api_name": "xbmc.sleep", "line_number": 597, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 614, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 614, "usage_type": "attribute"}, {"api_name": "xbmc.abortRequested", "line_number": 615, "usage_type": "attribute"}, {"api_name": "xbmc.sleep", "line_number": 620, "usage_type": "call"}, {"api_name": "xbmc.sleep", "line_number": 624, "usage_type": "call"}]} +{"seq_id": "190591342", "text": "#!/usr/bin/env python3\n\nfrom pymongo import MongoClient\nfrom pprint import pprint\n\n\ndef insert_one():\n try:\n column = db.column_name\n # prep/get data for payload\n userId = input('Enter Employee id :')\n userName = input('Enter Name :')\n userAge = input('Enter age :')\n userCountry = input('Enter Country :')\n\n # payload insert to users table\n payload = {\n \"id\": userId,\n \"name\": userName,\n \"age\": userAge,\n \"country\": userCountry\n }\n column.insert(payload)\n print('\\nInserted data successfully\\n')\n pprint(payload)\n\n except:\n print('Insert error')\n\n\ndef insert_many():\n pass\n\n\ndef update_one():\n one_or_many = input('\\n Select 1 for one update, or 2 for many updates\\n')\n column = db.column_name\n key = input('\\nkey term: ')\n value = input('\\nvalue term: \\n')\n try:\n if int(one_or_many) == 1:\n column.update_one({key: value})\n else:\n column.update_many({key: value})\n except:\n print('Update error')\n\n\ndef update_many():\n pass\n\n\ndef read_one():\n try:\n column = db.column_name\n key = input('\\nkey term: ')\n value = input('\\nvalue term: \\n')\n pprint(column.find_one({key: value}))\n except:\n print('Read error')\n\n\ndef read_many():\n try:\n column = db.column_name\n print(list(column.find()))\n except:\n print('Read many error')\n\n\ndef delete_one():\n pass\n\n\ndef delete_many():\n pass\n\n\ndef main():\n while True:\n selection = input('\\nSelect:\\n1 to insert_one, 11 to insert_many,\\n2 to update_one, 22 to update_many,\\n3 to read_one, 33 to read_many,\\n4 to delete_one, 44 to delete_many\\n')\n\n if selection == '1':\n insert_one()\n elif selection == '11':\n insert_many()\n elif selection == '2':\n update_one()\n elif selection == '22':\n update_many()\n elif selection == '3':\n read_one()\n elif selection == '33':\n read_many()\n elif selection == '4':\n delete_one()\n elif selection == '44':\n delete_many()\n else:\n print('\\nEXIT\\n')\n break\n\n\nif __name__ == '__main__':\n # connection to Mongo\n conn = MongoClient('localhost', 27017)\n # connection to database\n db = conn.useful_database_name\n main()\n conn.close()\n\n", "sub_path": "crud.py", "file_name": "crud.py", "file_ext": "py", "file_size_in_byte": 2493, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pprint.pprint", "line_number": 25, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 58, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "222803326", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 4 00:05:30 2018\n\n@author: Workstation\n\"\"\"\n\nimport numpy as np\nimport talib\nimport pandas as pd\nimport mf\n\n\ndf = pd.read_csv(\"C:/Users/Workstation/Desktop/82_ETF_FOREX_1_DAY.csv\").fillna(method = \"ffill\")\n\npar = dict(sma_period = 5, signal_shift = 1)\npar2 = dict(sma_period = 5, signal_shift = 2)\n\nsignal_lambda = lambda x: talib.SMA(x, par[\"sma_period\"]) > x\n\ndef PredAna(df, par, signal_lambda):\n signal_df = df.set_index(\"Date\").apply(signal_lambda)\n \n return_df = df.set_index(\"Date\").pct_change().fillna(0)\n signal_df = signal_df.shift(par[\"signal_shift\"]).fillna(False)\n \n full_result = []\n returns_dict = dict()\n for rcol in return_df:\n for scol in signal_df:\n signal = signal_df[scol].values\n returns = return_df[rcol].values\n filtered_returns = signal*returns\n metric_result = mf.compute_metrics(filtered_returns)\n result_name = scol + \"_\" + rcol\n metric_result[\"name\"] = result_name\n full_result.append(metric_result)\n returns_dict[result_name] = filtered_returns\n return full_result, returns_dict\n\nfull_result, returns_dict = PredAna(df, par, signal_lambda)\nfull_result2, returns_dict2 = PredAna(df, par2, signal_lambda)\n\n\n\nresult_df = pd.DataFrame(full_result).sort_values(\"R Squared\", ascending = False)\nresult_df2 = pd.DataFrame(full_result2).sort_values(\"R Squared\", ascending = False)\n\nresult_df[result_df[\"name\"] == \"LQD_DIA\"]\nresult_df2[result_df2[\"name\"] == \"LQD_DIA\"]\n\nresult_df[\"Sharpe\"].hist(bins = 30)\n\n\nref = \"FEZ\"\nmain = \"XLP\"\na1 = df[[\"Date\", ref]].set_index(\"Date\").apply(signal_lambda)\na2 = df[[\"Date\", main]].set_index(\"Date\").pct_change().shift(-1).fillna(0)\npd.DataFrame(mf.cumulative_returns((a1[ref]*a2[main]).values)).plot()\n(a1[ref]*a2[main]).reset_index().set_index(\"Date\").apply(lambda x: mf.cumulative_returns(x)).plot()\n", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 1928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "talib.SMA", "line_number": 19, "usage_type": "call"}, {"api_name": "mf.compute_metrics", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "mf.cumulative_returns", "line_number": 59, "usage_type": "call"}, {"api_name": "mf.cumulative_returns", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "15248917", "text": "'''\nSome of useful hand-written helpers\n'''\n\nfrom yaml import load\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\n\ndef get_dataset(filename):\n '''\n Iterator for dataset's items\n\n :param filename: Path to dataset's file\n :type filename: str\n\n :return: Dataset's items\n\n :raises OSError: if has problem with file\n :raises yaml.YAMLError: if has problem with format\n :raises ValueError: if has problem with content\n '''\n with open(filename, 'rt', encoding='utf-8') as input:\n package = load(input, Loader=Loader)\n dataset = package.get('dataset')\n\n if not isinstance(dataset, list): raise ValueError('wrong format')\n yield from dataset\n\n\ndef exc_parse(exc_info):\n line = None\n fname = None\n name = None\n desc = None\n\n try:\n name = exc_info[0].__name__\n desc = exc_info[1]\n frame = exc_info[2]\n\n while frame.tb_next:\n frame = frame.tb_next\n\n line = frame.tb_lineno\n fname = frame.tb_frame.f_code.co_filename\n except:\n pass\n\n return name, desc, fname, line\n", "sub_path": "amsite/management/commands/_utils.py", "file_name": "_utils.py", "file_ext": "py", "file_size_in_byte": 1138, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "yaml.load", "line_number": 27, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "264855775", "text": "\"\"\"\n This module is used to manage healthcheck on different kinds of nodes.\n\"\"\"\n\nimport logging\n\nfrom util.executor import Job\nfrom db.pdc_db import PdcInfo\n\nlogger = logging.getLogger(__name__)\n\n\nclass Dashboard(Job):\n \"\"\" This class is used to specify the given feature to Executor,\n and organize the results into database.\n \"\"\"\n def __init__(self, node, node_name, frequency, proxy):\n self.node_name = node_name\n super(Dashboard, self).__init__(node, node_name, proxy, frequency)\n\n def post(self, collections):\n \"\"\" This function is used to store formatted results into database.\n \"\"\"\n for collection in collections:\n pdcinfo = PdcInfo(self.node_name)\n record = collections[collection]\n if collection == 'alarmsDetail':\n pdcinfo.update(collection, record)\n else:\n pdcinfo.insert(collection, record)\n\n def append(self, collections, record):\n \"\"\" This function is used to do some special operaton on epg alarm.\n Other data is just simply stored into collections.\n \"\"\"\n for key in record:\n if key == 'epgAlarms':\n collections.setdefault(key, {})\n for alarm in record[key]:\n alarms = collections[key]\n alarms[alarm] = alarms.setdefault(alarm, 0) + record[key][alarm]\n else:\n collections.update({key: record[key]})\n", "sub_path": "epc_oam_apps_160930/target/worker/networkMonitor/dashboard.py", "file_name": "dashboard.py", "file_ext": "py", "file_size_in_byte": 1484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "util.executor.Job", "line_number": 13, "usage_type": "name"}, {"api_name": "db.pdc_db.PdcInfo", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "328388382", "text": "from django.urls import path\n\nfrom ..views.viewsets import (\n SnippetViewSet,\n UserViewSet,\n)\n\nsnippet_list = SnippetViewSet.as_view({\n 'get': 'list',\n 'post': 'create',\n})\nsnippet_detail = SnippetViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy',\n})\n\nuser_list = UserViewSet.as_view({\n 'get': 'list',\n})\nuser_detail = UserViewSet.as_view({\n 'get': 'retrieve',\n})\n\nurlpatterns = [\n path('snippets/',\n snippet_list,\n name='snippet-list'),\n path('snippets//',\n snippet_detail,\n name='snippet-detail'),\n path('users/',\n user_list,\n name='user-list'),\n path('users//',\n user_detail,\n name='user-detail'),\n]", "sub_path": "app/snippets/urls/viewsets.py", "file_name": "viewsets.py", "file_ext": "py", "file_size_in_byte": 779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "views.viewsets.SnippetViewSet.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.viewsets.SnippetViewSet", "line_number": 8, "usage_type": "name"}, {"api_name": "views.viewsets.SnippetViewSet.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.viewsets.SnippetViewSet", "line_number": 12, "usage_type": "name"}, {"api_name": "views.viewsets.UserViewSet.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "views.viewsets.UserViewSet", "line_number": 19, "usage_type": "name"}, {"api_name": "views.viewsets.UserViewSet.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "views.viewsets.UserViewSet", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "435235045", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 13 11:54:42 2021\n\n@author: pi\n\"\"\"\nimport random\nimport numpy as np\nimport sympy as sp\nfrom sympy import solveset, symbols, Interval\n\ndef get_m(function, lower_bound, upper_bound):\n #Reference https://stackoverflow.com/questions/39192643/finding-the-minimum-of-a-function-on-a-closed-interval-with-python\n # Lo cambié por Max, había generado un código anterior a este, pero este cubre casos en que M pudiera no estar en los limites del rango\n zeros = solveset(function, x, domain=Interval(lower_bound, upper_bound))\n assert zeros.is_FiniteSet # If there are infinite solutions the next line will hang.\n ans = [np.abs(np.float(function.evalf(subs= {x:lower_bound}))), np.abs(np.float(function.evalf(subs= {x:upper_bound}))), *[function.evalf(subs= {x:i}) for i in zeros]]\n ans= np.max(ans)\n return(ans)\n\n\ndef serie_potencias_calcular_estimar(fn, a, n, f0):\n\n #funcion, variable, suma\n f,x,g=sp.symbols('f,x,g')\n \n f=fn\n \n L=sp.plot(sp.integrate(f),(x,-1,1),show=False,legend=True)\n L.label='$f$'\n \n error=10000\n\n while abs(error) > (10 ** (-4)):\n g=(sp.diff(f,x,0).subs(x,a)/sp.factorial(0))*((x-a)**0)\n conTerminos=0\n i=1\n #sumatoria en for dependiendo del número de terminos\n #Se itera en base al número de términos que sean VALIDOS\n while(conTerminos <= n-1):\n termino_nuevo=sp.diff(f,x,i).subs(x,a)/sp.factorial(i)*((x-a)**i)\n termino_nuevo_ev=termino_nuevo.evalf(subs= {x:f0})\n if(termino_nuevo_ev != 0):\n conTerminos+=1\n g+=termino_nuevo \n i+=1\n \n g_next_diff = sp.diff(f,x,n+1) \n \n M=sp.plot(sp.integrate(g),(x,-1,1),show=False,legend=True)\n M[0].label='n={}'.format(n)\n ##color random\n r=lambda: random.randint(0,255)\n color='#%02X%02X%02X'%(r(),r(),r())\n M[0].line_color=color\n L.append(M[0])\n \n g=sp.integrate(g)\n #obtenemos M\n m = get_m(g_next_diff,f0,a)\n #print(m)\n #Calculamos el error absoluto\n error= np.float(((np.float(m))*((np.abs(f0-a))**(n+1)))/np.math.factorial(n+1))\n \n #print(g)\n val_estimado = g.evalf(subs= {x:1.0})\n print(f'El valor estimado da igual por la serie para x=pi/60 es igual = {val_estimado:.8}')\n print(f'The absolute error for function {f}, Maclaurin Serie around a={a}, evaluating x={f0:.5}, with {n} terms is {np.float(error):.8}')\n print('\\n')\n n+=1 \n L.show() \n\n#Ejercicio 8\nx = symbols('x')\nserie_potencias_calcular_estimar(x*sp.cos(x**3),0.4,10,1.0)", "sub_path": "hmw3_adrian_estimar.py", "file_name": "hmw3_adrian_estimar.py", "file_ext": "py", "file_size_in_byte": 2716, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sympy.solveset", "line_number": 16, "usage_type": "call"}, {"api_name": "sympy.Interval", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 19, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 26, "usage_type": "call"}, {"api_name": "sympy.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "sympy.integrate", "line_number": 30, "usage_type": "call"}, {"api_name": "sympy.diff", "line_number": 36, "usage_type": "call"}, {"api_name": "sympy.factorial", "line_number": 36, "usage_type": "call"}, {"api_name": "sympy.diff", "line_number": 42, "usage_type": "call"}, {"api_name": "sympy.factorial", "line_number": 42, "usage_type": "call"}, {"api_name": "sympy.diff", "line_number": 49, "usage_type": "call"}, {"api_name": "sympy.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "sympy.integrate", "line_number": 51, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 54, "usage_type": "call"}, {"api_name": "sympy.integrate", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.math.factorial", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.math", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 69, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 75, "usage_type": "call"}, {"api_name": "sympy.cos", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "174322475", "text": "#!/usr/bin/python3\n\nimport pdg.pdgTable as pdgTable\nimport pdg.pdgListing as pdgListing\ntable = pdgTable.PDGTable()\npdg_listing = pdgListing.PDGListing()\n\nimport click\n@click.group()\ndef cli():\n pass\n\n@cli.command()\n@click.argument(\"name\", type=str)\ndef info(name):\n \"\"\" Get the infomation of a specific particle.\"\"\"\n infomation = table.get_infomation(name)\n click.echo(infomation)\n\n@cli.command()\n@click.argument(\"name\", type=str)\ndef listing(name):\n \"\"\" Get the review of a specific particle.\"\"\"\n pdg_listing.lookup(name)\n\ndef main():\n cli()\n\n__version__ = \"1.1.0\"\n", "sub_path": "pdg/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pdg.pdgTable.PDGTable", "line_number": 5, "usage_type": "call"}, {"api_name": "pdg.pdgTable", "line_number": 5, "usage_type": "name"}, {"api_name": "pdg.pdgListing.PDGListing", "line_number": 6, "usage_type": "call"}, {"api_name": "pdg.pdgListing", "line_number": 6, "usage_type": "name"}, {"api_name": "click.group", "line_number": 9, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 18, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 14, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "271561933", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom multiprocessing import Process, Pipe\nimport argparse\nimport logging\nlog = logging.getLogger(__name__)\nimport os\nimport sys\nimport re\nimport json\nimport subprocess\nimport shutil\nfrom time import time, sleep\nfrom argparse import ArgumentParser\nfrom watchdog.observers import Observer\nfrom watchdog.events import (\n FileSystemEventHandler,\n FileModifiedEvent,\n FileDeletedEvent,\n FileCreatedEvent\n)\n\n\nCWD = os.getcwd()\nCONFIG_FILE = os.path.join(CWD, 'autosync.json')\nWORK_DIR = os.path.join(CWD, '.autosync')\nTIMEOUT = 1 # seconds to wait before syncing (each change resets the timer)\n\n# TODO: fix the fact that logging must be set up here instead of at the bottom so it can be used by subprocesses\nparser = argparse.ArgumentParser()\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-v', '--verbose', help=\"increase verbosity (show debugging messages)\",\n action='store_const', const=logging.DEBUG, dest='loglevel')\ngroup.add_argument('-q', '--quiet', help=\"decrease verbosity (only show warnings)\",\n action='store_const', const=logging.WARNING, dest='loglevel')\nargs = parser.parse_args()\n\nlogging.basicConfig(\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%H:%M:%S\",\n level=args.loglevel or logging.INFO\n)\n\ndef fslash(path):\n ret = path.replace('\\\\', '/')\n while '//' in ret:\n ret = ret.replace('//', '/')\n return ret\n\nclass Job(FileSystemEventHandler):\n changed_dirs = []\n\n def __repr__(self):\n return \"\".format(self.name)\n\n def __init__(self, observer, name, local, remote, ignore=[]):\n log.debug(\"Initializing job: %s\", name)\n super(Job, self).__init__()\n self.name = name\n self.sync_count = 0\n\n if os.path.isabs(local):\n log.critical(\"%s Local path must be relative to %s\", self, CWD)\n sys.exit(1)\n\n self.local_rel = os.path.relpath(local, CWD)\n self.local_abs = os.path.join(CWD, local)\n if not os.path.exists(self.local_abs):\n log.critical(\"%s Local path does not exist: %s\", self, self.local_abs)\n sys.exit(1)\n\n self.remote = remote\n\n log.debug(\"%s Local rel: %s; Local abs: %s; Remote: %s\", self, self.local_rel, self.local_abs, self.remote)\n\n self.work_dir = os.path.join(WORK_DIR, name)\n if os.path.exists(self.work_dir):\n log.debug(\"%s Deleting old work directory: %s\", self, self.work_dir)\n shutil.rmtree(self.work_dir)\n\n attempts = 0\n while True:\n if attempts >= 10:\n raise Exception(\"Couldn't create directory: {0}\".format(self.work_dir))\n log.debug(\"%s Creating work directory: %s (%s failed attempts)\", self, self.work_dir, attempts)\n try:\n os.makedirs(self.work_dir)\n break\n except Exception as e:\n attempts += 1\n log.warning(e)\n\n # build a regular expression for the ignore list\n ignore_regexes = []\n ignore_list = global_ignore + ignore\n for i, path in enumerate(ignore_list):\n path = fslash(path) # ensure all slashes are forward, for easy matching\n path = path.replace('.', '\\\\.') # match literal dots in the regex\n path = path.replace('*', '.*') # turen glob wildcards into regex ones\n ignore_regexes.append('^' + path + '$') # the file/dir itself\n ignore_regexes.append('^' + path + '/.*') # any children, if it's a dir\n log.debug(\"%s Regexified ignore list: %s\", self, ignore_regexes)\n self.ignore_re = re.compile('|'.join(ignore_regexes), re.IGNORECASE)\n\n # create an exclude file for rsync\n self.exclude_file = os.path.join(self.work_dir, 'exclude.txt')\n with open(self.exclude_file, 'wb') as f:\n for path in ignore_list:\n f.write('{0}{1}'.format(path, os.linesep).encode('utf-8'))\n\n log.debug(\"%s Adding handler to observer\", self)\n observer.schedule(self, self.local_abs, recursive=True)\n\n log.info(\"%s %s --> %s (ignoring %s)\",\n self, self.local_abs, remote, 'only globals' if not ignore else ', '.join(ignore))\n\n self.parent_pipe, child_pipe = Pipe()\n self.child_process = Process(target=self.wait_for_change_signals, args=(child_pipe,))\n self.child_process.start()\n\n def on_any_event(self, event):\n \"\"\"\n When a file changes, if it's not in the ignore list, alert the child process.\n \"\"\"\n rel_path = fslash(os.path.relpath(event.src_path, self.local_abs))\n if self.ignore_re.match(rel_path):\n log.debug(\"%s Ignored change in %s\", self, rel_path)\n return\n log.debug(\"%s Logged change in %s\", self, rel_path)\n self.parent_pipe.send(rel_path)\n\n def sync(self):\n \"\"\"\n Actually call rsync.\n \"\"\"\n log.info(\"%s Starting%s sync\", self, ' initial' if self.sync_count == 0 else '')\n args = [\n 'rsync',\n #'-a', # archive mode; equals -rlptgoD (no -H,-A,-X)\n '-r', # recurse into directories\n '-z', # compress file data during the transfer\n '-q', # suppress non-error messages\n '--delete', # delete extraneous files from destination dirs\n '--exclude-from={0}'.format(os.path.relpath(self.exclude_file, CWD)),\n fslash(self.local_rel).rstrip('/') + '/',\n fslash(self.remote).rstrip('/') + '/',\n '--chmod=ugo=rwX' # use remote default permissions for new files\n ]\n log.debug(\"%s Running command: %s\", self, ' '.join(args))\n start_time = time()\n subprocess.call(args)\n log.info(\"%s Sync done in %d seconds\", self, time() - start_time)\n self.sync_count += 1\n\n def wait_for_sync_signals(self, pipe):\n log.debug(\"%s Waiting for sync signals\", self)\n try:\n while True:\n if pipe.poll(None):\n pipe.recv()\n self.sync()\n except KeyboardInterrupt:\n log.debug(\"%s Stopped watching for sync signals\", self)\n\n def wait_for_change_signals(self, pipe):\n \"\"\"\n Wait for changes to be made by the parent process and start a sync when the timeout expires.\n \"\"\"\n parent_sync_pipe, child_sync_pipe = Pipe()\n sync_process = Process(target=self.wait_for_sync_signals, args=(child_sync_pipe,))\n sync_process.start()\n log.debug(\"%s Waiting for change signals\", self)\n last_change = 1 # force initial sync\n try:\n while True:\n now = time()\n # check if a change was posted\n if pipe.poll(0.1):\n pipe.recv()\n last_change = now\n # check if the change timeout has expired, and sync if so\n if last_change and now - last_change > TIMEOUT:\n last_change = 0\n parent_sync_pipe.send(1)\n except KeyboardInterrupt:\n log.debug(\"%s Stopped watching for change signals\", self)\n\ndef load_config():\n \"\"\"Look for a config file in the CWD and parse it.\"\"\"\n global global_ignore\n\n if os.path.exists(CONFIG_FILE):\n log.info(\"Loading config from %s\", CONFIG_FILE)\n with open(CONFIG_FILE, 'r') as config_file:\n config = json.load(config_file)\n log.debug(\"Config dict: %s\", config)\n\n # get the global ignore list\n global_ignore = config.get('ignore', [])\n log.info(\"Global ignore list: %s\", global_ignore)\n return config\n else:\n log.warning(\"No config file found in %s\", CWD)\n sys.exit(\"Create %s first\" % CONFIG_FILE)\n\ndef create_jobs(config):\n \"\"\"Instantiates an event handler for each job defined in the config.\"\"\"\n jobs = {}\n jobs_ = config.get('jobs', {})\n job_count = len(jobs_)\n if job_count:\n log.debug(\"Starting %s jobs (%s)\", job_count, ', '.join(jobs_.keys()))\n for name, data in jobs_.items():\n job = Job(observer, name, **data)\n jobs[name] = job\n del jobs_, job_count\n try:\n observer.start()\n except FileNotFoundError as e:\n log.error(e.args[1])\n sys.exit(1)\n return jobs\n else:\n sys.exit(\"No jobs defined in config file\")\n\nif __name__ == '__main__':\n if not os.path.exists(WORK_DIR):\n os.makedirs(WORK_DIR)\n\n observer = Observer()\n config = load_config()\n jobs = create_jobs(config)\n try:\n while True:\n sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n log.info(\"Normal shutdown\")\n observer.join()\n", "sub_path": "autosync.py", "file_name": "autosync.py", "file_ext": "py", "file_size_in_byte": 8868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 43, "usage_type": "attribute"}, {"api_name": "watchdog.events.FileSystemEventHandler", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.isabs", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 81, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 89, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 105, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 111, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pipe", "line_number": 119, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 152, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 153, "usage_type": "call"}, {"api_name": "time.time", "line_number": 154, "usage_type": "call"}, {"api_name": "multiprocessing.Pipe", "line_number": 171, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 172, "usage_type": "call"}, {"api_name": "time.time", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 197, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 206, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 223, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 230, "usage_type": "call"}, {"api_name": "watchdog.observers.Observer", "line_number": 232, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 237, "usage_type": "call"}]} +{"seq_id": "193187719", "text": "from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver.support.ui import Select\r\nimport pdfkit\r\nimport time\r\nimport datetime\r\nfrom datetime import timedelta\r\npath_wkthmltopdf = r'C:\\Program Files (x86)\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'\r\nconfig = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)\r\n#### number of days in advance to schedule\r\ndaysToSchedual = 28\r\n\r\ndef getMonth(dateTime):\r\n month = eval(dateTime[:dateTime.find('/')])\r\n return str(month)\r\n\r\ndef getDay(dateTime):\r\n day = eval(dateTime[dateTime.find('/')+1:dateTime.rfind('/')])\r\n return str(day)\r\n\r\ndef getYear(dateTime):\r\n year = eval(dateTime[dateTime.rfind('/')+1:dateTime.rfind('/')+5])\r\n print(year)\r\n print(dateTime.find(' '))\r\n return str(year)\r\n\r\ndef getT(dateTime, startbool):\r\n print(dateTime)\r\n hours = eval(dateTime[dateTime.find(' ')+1:dateTime.find(':')])\r\n mins = float(dateTime[dateTime.find(':')+1:dateTime.rfind(' ')])\r\n if 'P' in dateTime :\r\n if hours != 12 :\r\n hours += 12\r\n elif hours == 12 :\r\n hours = 0\r\n mins = round(mins/30.0)\r\n hours = hours * 60 + mins * 30\r\n if hours > 1410 :\r\n hours = 1410\r\n return str(hours)\r\n\r\ndef openTripTracker():\r\n #Login Trip Tracker\r\n driver = webdriver.Chrome()\r\n actionChains = ActionChains(driver)\r\n driver.get(\"https://vtrans-web.hopkinsschools.org/Triptracker/Login.aspx?ReturnUrl=%2fTriptracker%2fTripRequest.aspx%3fRecordID%3d3251&RecordID=3251\")\r\n element = driver.find_element_by_name(\"TxtBxUName\")\r\n element.send_keys(\"[user name]\") #add user name\r\n element = driver.find_element_by_name(\"TxtBxPWord\")\r\n element.send_keys(\"[password]\") #add password\r\n driver.find_element_by_id(\"BtnLogin\").click()\r\n return driver\r\n\r\ndef iterateCharters(driver, webmail):\r\n getCharterinfo(driver, webmail)\r\n\r\n\r\ndef makePdf(driver, charter):\r\n print(charter)\r\n #find out what this window is called so you can get back to it\r\n main_window_handle = None\r\n while not main_window_handle:\r\n main_window_handle = driver.current_window_handle\r\n\r\n #open new popup that has info\r\n driver.find_element_by_id(\"ctl00_contentPage_ucTripRequestView_PrintPage\").click()\r\n\r\n #find new window\r\n signin_window_handle = None\r\n while not signin_window_handle:\r\n for handle in driver.window_handles:\r\n if handle != main_window_handle:\r\n signin_window_handle = handle\r\n break\r\n driver.switch_to.window(signin_window_handle) #switch to new window\r\n\r\n #start generating html code for pdf\r\n pdfhtml = '

'\r\n startTime = charter[2]\r\n startDate = datetime.date(eval(getYear(startTime)), eval(getMonth(startTime)), eval(getDay(startTime)))\r\n startDay = startDate.weekday()\r\n if startDay == 0 :\r\n pdfhtml += \"Monday \"\r\n elif startDay == 1:\r\n pdfhtml += \"Tuesday \"\r\n elif startDay == 2:\r\n pdfhtml += \"Wednesday \"\r\n elif startDay == 3:\r\n pdfhtml += \"Thursday \"\r\n elif startDay == 4:\r\n pdfhtml += \"Friday \"\r\n elif startDay == 5:\r\n pdfhtml += \"Saturday \"\r\n else:\r\n pdfhtml += \"Sunday \"\r\n\r\n if charter[5] :\r\n pdfhtml += \"Trailer \"\r\n\r\n if charter[6] > 0:\r\n pdfhtml += \"WC x \" + str(charter[6])\r\n \r\n pdfhtml += '

'\r\n pdfhtml += 'updated ' + str(datetime.datetime.now())\r\n pdfhtml += driver.find_element_by_xpath('/html/body').get_attribute(\"innerHTML\")\r\n pdfhtml += ''\r\n pdfhtml += ''\r\n pdfhtml += ''\r\n pdfhtml += ''\r\n pdfhtml += ' \t
NAME Time Mileage
SIGN ON XXXXXXX
LEAVE BASE
ARRIVE PICKUP
DEPART PICKUP XXXXXXX
ARRIVE DROP OFF
DEPART DROP OFF
ARRIVE ORIGIN
DEPART ORIGIN XXXXXXX
ARRIVE BASE
SIGN OUT XXXXXXX
TOTAL
'\r\n\r\n driver.close()\r\n driver.switch_to.window(main_window_handle)\r\n\r\n driver.find_element_by_xpath('//*[@id=\"ctl00_contentPage_btnTripDirections\"]').click()\r\n driver.implicitly_wait(10)\r\n time.sleep(5)\r\n driver.find_element_by_link_text(\"Open Trip Directions/Mapping Tool\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(4)\r\n driver.implicitly_wait(10)\r\n driver.find_element_by_xpath('//*[@id=\"CalcDirectionsButton\"]').click()\r\n driver.implicitly_wait(10)\r\n time.sleep(2)\r\n element = driver.find_element_by_xpath('//*[@id=\"SetDirectionButton\"]').click()\r\n driver.implicitly_wait(10)\r\n time.sleep(2)\r\n \r\n try:\r\n WebDriverWait(driver, 3).until(EC.alert_is_present(),\r\n 'Timed out waiting for PA creation ' +\r\n 'confirmation popup to appear.')\r\n\r\n alert = driver.switch_to_alert()\r\n alert.accept()\r\n print(\"alert accepted\")\r\n except TimeoutException:\r\n print(\"no alert\")\r\n\r\n driver.find_element_by_xpath('//*[@id=\"ctl00_contentPage_TripMapToolBing_HeaderPanel\"]/table/tbody/tr/td[2]/a[2]').click()\r\n\r\n driver.find_element_by_link_text(\"Scheduling\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(8)\r\n driver.find_element_by_id(\"ctl00_contentPage_ucTripScheduleNew_grdMasterGrid_cell0_6_ddlVehicleDriverList_B-1\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(10)\r\n driver.find_element_by_id(\"ctl00_contentPage_ucTripScheduleNew_grdMasterGrid_cell0_6_ddlVehicleDriverList_DDD_L_LBI1T0\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(6)\r\n driver.find_element_by_id(\"ctl00_contentPage_ucTripScheduleNew_grdMasterGrid_cell0_7_ddlVehicleList_B-1\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(10)\r\n driver.find_element_by_xpath('//*[@id=\"ctl00_contentPage_ucTripScheduleNew_grdMasterGrid_cell0_7_ddlVehicleList_DDD_L_LBT\"]/tbody/tr[3]').click()\r\n driver.implicitly_wait(10)\r\n time.sleep(6)\r\n driver.find_element_by_id(\"ctl00_contentPage_ucTripScheduleNew_btnSaveSchedule\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(6)\r\n driver.find_element_by_id(\"ctl00_contentPage_ucTripScheduleNew_btnDTSReportWithMap\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(10)\r\n signin_window_handle = None\r\n while not signin_window_handle:\r\n for handle in driver.window_handles:\r\n if handle != main_window_handle:\r\n signin_window_handle = handle\r\n break\r\n driver.switch_to.window(signin_window_handle)\r\n driver.implicitly_wait(10)\r\n time.sleep(2)\r\n driver.find_element_by_id(\"ctl00_contentPage_PreviewButton\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(2)\r\n\r\n driver.switch_to.frame(driver.find_element_by_xpath('//*[@id=\"ctl00_contentPage_TylerReportViewer_ContentFrame\"]'))\r\n pdfhtml += '

'\r\n pdfhtml += driver.find_element_by_xpath('/html/body').get_attribute(\"innerHTML\")\r\n driver.switch_to.default_content()\r\n \r\n print(driver.find_element_by_id(\"ctl00_contentPage_ReportToolbar1_Menu_ITCNT7_PageCount_I\").get_attribute('value'))\r\n page_count = eval(driver.find_element_by_id(\"ctl00_contentPage_ReportToolbar1_Menu_ITCNT7_PageCount_I\").get_attribute('value'))\r\n for i in range(page_count - 1):\r\n print(i)\r\n driver.find_element_by_id(\"ctl00_contentPage_ReportToolbar1_Menu_DXI8_T\").click()\r\n driver.implicitly_wait(10)\r\n time.sleep(2)\r\n driver.switch_to.frame(driver.find_element_by_xpath('//*[@id=\"ctl00_contentPage_TylerReportViewer_ContentFrame\"]'))\r\n pdfhtml += '

'\r\n pdfhtml += driver.find_element_by_xpath('/html/body').get_attribute(\"innerHTML\")\r\n driver.switch_to.default_content()\r\n\r\n print(\"####html>>out2.pdf\")\r\n\r\n index = pdfhtml.find('>>> one way end time\", endTime)\r\n numBuses = eval(driver.find_element_by_xpath('//*[@id=\"ctl00_contentPage_ucTripRequestView_lblNoVehicles\"]').text)\r\n trailerBool = \"trailer\" in str(driver.find_element_by_xpath('//*[@id=\"contentstart\"]').get_attribute(\"innerHTML\")).lower()\r\n wc = eval(driver.find_element_by_xpath('//*[@id=\"ctl00_contentPage_ucTripRequestView_lblNoWheelchairs\"]').text)\r\n print(charterId, \":\", charterName, \"@\", startTime, \"-\", endTime, \"#\", numBuses, \"^T\", trailerBool, \"%WC\", wc)\r\n\r\n startDate = datetime.date(eval(getYear(startTime)), eval(getMonth(startTime)), eval(getDay(startTime)))\r\n startDay = startDate.weekday()\r\n today = datetime.date.today()\r\n weekout = today + timedelta(days=daysToSchedual)\r\n\r\n lessThanWeekAway = startDate < weekout\r\n\r\n if True :\r\n charter = [charterId, charterName, startTime, endTime, numBuses, trailerBool, wc, numBuses, startDay]\r\n makePdf(driver, charter)\r\n #webmail = openWebMail()\r\n while charter[4] > 0 :\r\n addCharterToCalandar(webmail, charter)\r\n charter[4] -= 1\r\n #strng = input(\"enter random value to continue\")\r\n #webmail.close()\r\n else :\r\n print(\"I can't see the future\")\r\n \r\n return lessThanWeekAway\r\n\r\ndef openWebMail():\r\n #https://webmail.mtibus.com/owa/auth/logon.aspx?replaceCurrent=1&url=https%3a%2f%2fwebmail.mtibus.com%2fowa%2f\r\n edriver = webdriver.Chrome()\r\n edriver.get(\"https://webmail.mtibus.com/owa/auth/logon.aspx?replaceCurrent=1&url=https%3a%2f%2fwebmail.mtibus.com%2fowa%2f\")\r\n edriver.find_element_by_id(\"chkBsc\").click()\r\n #assert \"Python\" in driver.title\r\n element = edriver.find_element_by_name(\"username\")\r\n element.send_keys(\"[user name]\") #add user name\r\n element = edriver.find_element_by_name(\"password\")\r\n element.send_keys(\"[password]\") #add password\r\n element.send_keys(Keys.RETURN)\r\n return edriver\r\n\r\ndef addCharterToCalandar(edriver, charter):\r\n edriver.find_element_by_id(\"lnkNavCal\").click()\r\n edriver.get(\"https://webmail.mtibus.com/owa/?ae=Folder&t=IPF.Appointment&id=LgAAAABy45u0jvv4T5%2fbXH9lEKAGAQBjaF2crhh5T4vQqjyTvs%2b7ACtQ8AANAAAC\")\r\n edriver.find_element_by_id(\"lnkHdrnewappt\").click()\r\n charterId = charter[0]\r\n charterName = charter[1]\r\n startTime = charter[2]\r\n endTime = charter[3]\r\n numBuses = charter[4]\r\n trailerBool = charter[5]\r\n wc = charter[6]\r\n multiBus = charter[7]\r\n subject = charterId + \" - \" + charterName \r\n if multiBus:\r\n subject += \" - BUS \" + str(numBuses)\r\n if trailerBool :\r\n subject += \" **TRAILER**\"\r\n if wc >= 1 :\r\n subject += \" **WC \" + str(wc) + \"**\"\r\n subject += \" Updated\"\r\n\r\n edriver.implicitly_wait(10)\r\n edriver.find_element_by_id(\"txtsbj\").send_keys(subject)\r\n\r\n #attach charter PDF\r\n edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[18]/td[2]/a').click()\r\n edriver.implicitly_wait(10)\r\n elem = edriver.find_element_by_xpath('//*[@id=\"attach\"]');\r\n edriver.implicitly_wait(10)\r\n filePath = \"C:\\\\Program Files (x86)\\\\Python36-32\\\\\" + charterId + \".pdf\"\r\n elem.send_keys(filePath);\r\n edriver.implicitly_wait(10)\r\n edriver.find_element_by_xpath('//*[@id=\"attachbtn\"]').click()\r\n edriver.find_element_by_xpath('//*[@id=\"lnkHdrdone\"]').click()\r\n\r\n #enter charter date and time info.\r\n startValue = getT(startTime, True)\r\n endValue = getT(endTime, True)\r\n if (eval(startValue) - eval(endValue)) >= 0:\r\n if (eval(startValue) - eval(endValue)) < 30:\r\n if endValue == \"1410\":\r\n startValue = \"1380\"\r\n else :\r\n endValue = str(eval(endValue) + 30)\r\n \r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[8]/td[5]/table/tbody/tr/td[1]/select'))\r\n select.select_by_value(getMonth(startTime))\r\n\r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[8]/td[5]/table/tbody/tr/td[2]/select'))\r\n select.select_by_value(getDay(startTime))\r\n\r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[8]/td[5]/table/tbody/tr/td[3]/select'))\r\n select.select_by_value(getYear(startTime))\r\n\r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[8]/td[5]/table/tbody/tr/td[4]/select'))\r\n select.select_by_value(startValue)\r\n\r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[10]/td[5]/table/tbody/tr/td[1]/select'))\r\n select.select_by_value(getMonth(endTime))\r\n\r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[10]/td[5]/table/tbody/tr/td[2]/select'))\r\n select.select_by_value(getDay(endTime))\r\n\r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[10]/td[5]/table/tbody/tr/td[3]/select'))\r\n select.select_by_value(getYear(endTime))\r\n\r\n \r\n select = Select(edriver.find_element_by_xpath('//*[@id=\"frm\"]/table/tbody/tr[2]/td[3]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/table/tbody/tr[10]/td[5]/table/tbody/tr/td[4]/select'))\r\n select.select_by_value(endValue)\r\n\r\n edriver.implicitly_wait(10)\r\n edriver.find_element_by_xpath('//*[@id=\"lnkHdrsaveclose\"]').click()\r\n\r\ndef main():\r\n driver = openTripTracker()\r\n webmail = openWebMail()\r\n doAgain = True\r\n while 'y' in input(\"Do you want to update a charter?\").lower() :\r\n iterateCharters(driver, webmail)\r\n \r\n webmail.close()\r\n driver.close()\r\n\r\nmain()\r\n", "sub_path": "python/Update charter.py", "file_name": "Update charter.py", "file_ext": "py", "file_size_in_byte": 16405, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pdfkit.configuration", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 110, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 123, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 133, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 136, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.alert_is_present", "line_number": 136, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 136, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 143, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 150, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 153, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 156, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 159, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 168, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 177, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 180, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 193, "usage_type": "call"}, {"api_name": "pdfkit.from_string", "line_number": 207, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 223, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 231, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 233, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 234, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 254, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 254, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 262, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 262, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 310, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 313, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 316, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 319, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 322, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 325, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 328, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 332, "usage_type": "call"}]} +{"seq_id": "244101079", "text": "import itertools\nimport random\nfrom colorama import init\nfrom colorama import Fore, Back, Style\nimport hw3\nimport sample_agent\nfrom copy import deepcopy\nimport time\ninit()\nCONSTRUCTOR_TIMEOUT = 60\nACTION_TIMEOUT = 5\nDIMENSIONS = (10, 10)\nPENALTY = 10000\nMAXIMAL_LENGTH = 100\n\ndef pad_the_input(a_map):\n state = {}\n new_i_dim = DIMENSIONS[0] + 2\n new_j_dim = DIMENSIONS[1] + 2\n for i in range(0, new_i_dim):\n for j in range(0, new_j_dim):\n if i == 0 or j == 0 or i == new_i_dim - 1 or j == new_j_dim - 1:\n state[(i, j)] = 'U'\n elif 'S' in a_map[i - 1][j - 1]:\n state[(i, j)] = 'S1'\n else:\n state[(i, j)] = a_map[i - 1][j - 1]\n return state\n\n\nclass Game:\n def __init__(self, a_map):\n self.ids = [hw3.ids, sample_agent.ids]\n self.initial_state = pad_the_input(a_map)\n self.state = deepcopy(self.initial_state)\n self.control_zone_1 = None\n self.control_zone_2 = None\n self.divide_map()\n self.score = [0, 0]\n self.agents = []\n\n def state_to_agent(self):\n state_as_list = []\n for i in range(DIMENSIONS[0]):\n state_as_list.append([]*DIMENSIONS[1])\n for j in range(DIMENSIONS[1]):\n state_as_list[i].append(self.state[(i + 1, j + 1)][0])\n return state_as_list\n\n def initiate_agent(self, module, control_zone, first):\n start = time.time()\n control_zone_to_agent = [(i - 1, j - 1) for (i, j) in control_zone]\n agent = module.Agent(self.state_to_agent(), control_zone_to_agent, first)\n if time.time() - start > CONSTRUCTOR_TIMEOUT:\n self.handle_constructor_timeout(module.ids)\n return agent\n\n def divide_map(self):\n habitable_tiles = [(i, j) for i, j in\n itertools.product(range(1, DIMENSIONS[0] + 1),\n range(1, DIMENSIONS[1] + 1)) if 'U' not in self.state[(i, j)]]\n random.shuffle(habitable_tiles)\n\n half = len(habitable_tiles) // 2\n self.control_zone_1 = set(habitable_tiles[:half])\n self.control_zone_2 = set(habitable_tiles[half:])\n assert len(self.control_zone_1) == len(self.control_zone_2)\n\n def get_action(self, agent):\n action = agent.act(self.state_to_agent())\n return action\n\n def check_if_action_legal(self, action, zone_of_control):\n try:\n if len(action) == 0:\n return True\n except TypeError:\n return False\n if len(action) > 3:\n return False\n count = {'vaccinate': 0, 'quarantine': 0}\n for atomic_action in action:\n effect, location = atomic_action[0], (atomic_action[1][0] + 1, atomic_action[1][1] + 1)\n try:\n status = self.state[location]\n except KeyError:\n return False\n if effect.lower() not in ['vaccinate', 'quarantine']:\n return False\n count[effect] += 1\n if count['vaccinate'] > 1 or count['quarantine'] > 2:\n return False\n if effect == 'vaccinate' and 'H' not in status:\n return False\n if effect == 'quarantine' and 'S' not in status:\n return False\n if location not in zone_of_control:\n return False\n\n return True\n\n def apply_action(self, actions):\n if not actions:\n return\n for atomic_action in actions:\n effect, location = atomic_action[0], (atomic_action[1][0] + 1, atomic_action[1][1] + 1)\n if 'v' in effect:\n self.state[location] = 'I'\n else:\n self.state[location] = 'Q0'\n\n def change_state(self):\n new_state = deepcopy(self.state)\n\n # virus spread\n for i in range(1, DIMENSIONS[0] + 1):\n for j in range(1, DIMENSIONS[1] + 1):\n if self.state[(i, j)] == 'H' and ('S' in self.state[(i - 1, j)] or\n 'S' in self.state[(i + 1, j)] or\n 'S' in self.state[(i, j - 1)] or\n 'S' in self.state[(i, j + 1)]):\n new_state[(i, j)] = 'S1'\n\n # advancing sick counters\n for i in range(1, DIMENSIONS[0] + 1):\n for j in range(1, DIMENSIONS[1] + 1):\n if 'S' in self.state[(i, j)]:\n turn = int(self.state[(i, j)][1])\n if turn < 3:\n new_state[(i, j)] = 'S' + str(turn + 1)\n else:\n new_state[(i, j)] = 'H'\n\n # advancing quarantine counters\n if 'Q' in self.state[(i, j)]:\n turn = int(self.state[(i, j)][1])\n if turn < 2:\n new_state[(i, j)] = 'Q' + str(turn + 1)\n else:\n new_state[(i, j)] = 'H'\n\n self.state = new_state\n\n def update_scores(self, player, control_zone):\n for (i, j) in control_zone:\n if 'H' in self.state[(i, j)]:\n self.score[player] += 1\n if 'I' in self.state[(i, j)]:\n self.score[player] += 1\n if 'S' in self.state[(i, j)]:\n self.score[player] -= 1\n if 'Q' in self.state[(i, j)]:\n self.score[player] -= 5\n\n def handle_constructor_timeout(self, agent):\n raise Exception\n\n def get_legal_action(self, number_of_agent, zoc):\n start = time.time()\n if number_of_agent == 0:\n action, board = self.get_action(self.agents[number_of_agent])\n else: \n action = self.get_action(self.agents[number_of_agent])\n board = None\n finish = time.time()\n if finish - start > ACTION_TIMEOUT:\n self.score[number_of_agent] -= PENALTY\n print(f'agent of {self.ids[number_of_agent]} timed out on action {finish - start}!')\n return 'illegal'\n if not self.check_if_action_legal(action, zoc):\n self.score[number_of_agent] -= PENALTY\n print(f'agent of {self.ids[number_of_agent]} chose illegal action {action}!')\n return 'illegal'\n return action, board\n\n def play_episode(self, swapped=False):\n counter = MAXIMAL_LENGTH\n while (('S1' in self.state.values() or 'S2' in self.state.values() or 'S3' in self.state.values())\n and (counter > 0)):\n\n counter = counter - 1\n def get_agent_zocs():\n to_agent_zoc = lambda zoc: [(i - 1, j - 1) for (i, j) in zoc]\n if not swapped:\n my_zone = to_agent_zoc(self.control_zone_1)\n ai_zone = to_agent_zoc(self.control_zone_2)\n else:\n my_zone = to_agent_zoc(self.control_zone_2)\n ai_zone = to_agent_zoc(self.control_zone_1)\n return my_zone, ai_zone\n def board_compare(board, player_comare):\n obs_state = self.state_to_agent()\n my_zone, ai_zone = get_agent_zocs()\n if player_comare == 0:\n zoc = my_zone\n else:\n zoc = ai_zone\n for (i, j) in zoc:\n assert obs_state[i][j] == board[i][j]\n\n def print_board(board = None):\n if board:\n obs_state = board\n else:\n obs_state = self.state_to_agent()\n my_zone, ai_zone = get_agent_zocs()\n for i, line in enumerate(obs_state):\n for j, stat in enumerate(line):\n if (i, j) in my_zone:\n color = Fore.GREEN\n elif (i, j) in ai_zone:\n color = Fore.RED\n else:\n color = Fore.WHITE\n print(color + stat, end =\" \")\n print()\n print()\n print(Style.RESET_ALL)\n #wait = input()\n print_board()\n \n if not swapped:\n action, board = self.get_legal_action(0, self.control_zone_1)\n if action == 'illegal':\n return\n self.apply_action(action)\n print(f'player {self.ids[0]} uses {action}!')\n board_compare(board, 0)\n board_compare(board, 1)\n action, _ = self.get_legal_action(1, self.control_zone_2)\n if action == 'illegal':\n return\n self.apply_action(action)\n print(f'player {self.ids[1]} uses {action}!')\n else:\n action, _ = self.get_legal_action(1, self.control_zone_1)\n if action == 'illegal':\n return\n self.apply_action(action)\n print(f'player {self.ids[1]} uses {action}!')\n\n action, board = self.get_legal_action(0, self.control_zone_2)\n if action == 'illegal':\n return\n self.apply_action(action)\n print(f'player {self.ids[0]} uses {action}!')\n\n self.change_state()\n if not swapped:\n self.update_scores(0, self.control_zone_1)\n self.update_scores(1, self.control_zone_2)\n else:\n self.update_scores(1, self.control_zone_1)\n self.update_scores(0, self.control_zone_2)\n print('------')\n\n def play_game(self):\n print(f'*********** starting a first round! ************ \\n \\n')\n self.agents = [self.initiate_agent(hw3, self.control_zone_1, 'first'),\n self.initiate_agent(sample_agent, self.control_zone_2, 'second')]\n self.play_episode()\n print(f'Score for {hw3.ids} is {self.score[0]}, score for {sample_agent.ids} is {self.score[1]}')\n print(f'*********** starting a second round! ************ \\n \\n')\n self.state = deepcopy(self.initial_state)\n\n self.agents = [self.initiate_agent(hw3, self.control_zone_2, 'second'),\n self.initiate_agent(sample_agent, self.control_zone_1, 'first')]\n\n self.play_episode(swapped=True)\n print(f'end of game!')\n return self.score\n\n\ndef main():\n a_map = [\n ['H', 'S', 'S', 'H', 'H', 'H', 'U', 'S', 'H', 'H'],\n ['H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'],\n ['H', 'U', 'U', 'H', 'H', 'U', 'H', 'H', 'H', 'H'],\n ['H', 'H', 'U', 'H', 'S', 'U', 'H', 'H', 'U', 'H'],\n ['H', 'H', 'U', 'H', 'H', 'U', 'H', 'H', 'S', 'H'],\n ['S', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'],\n ['H', 'H', 'H', 'S', 'U', 'U', 'H', 'H', 'H', 'U'],\n ['H', 'U', 'H', 'H', 'U', 'H', 'H', 'H', 'U', 'H'],\n ['H', 'H', 'U', 'H', 'H', 'U', 'H', 'S', 'U', 'H'],\n ['H', 'H', 'H', 'H', 'S', 'H', 'H', 'H', 'H', 'H'],\n ]\n assert len(a_map) == DIMENSIONS[0]\n assert len(a_map[0]) == DIMENSIONS[1]\n statistic = [0]*10\n for idx, _ in enumerate(statistic):\n game = Game(a_map)\n results = game.play_game()\n print(f'Score for {hw3.ids} is {results[0]}, score for {sample_agent.ids} is {results[1]}')\n statistic[idx] = results[0] > results[1]\n print(statistic)\nif __name__ == '__main__':\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11599, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "colorama.init", "line_number": 9, "usage_type": "call"}, {"api_name": "hw3.ids", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sample_agent.ids", "line_number": 33, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 60, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 62, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 113, "usage_type": "call"}, {"api_name": "time.time", "line_number": 159, "usage_type": "call"}, {"api_name": "time.time", "line_number": 165, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 210, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 210, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 212, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 212, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 214, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 214, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 218, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 218, "usage_type": "name"}, {"api_name": "hw3.ids", "line_number": 262, "usage_type": "attribute"}, {"api_name": "sample_agent.ids", "line_number": 262, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 264, "usage_type": "call"}, {"api_name": "hw3.ids", "line_number": 293, "usage_type": "attribute"}, {"api_name": "sample_agent.ids", "line_number": 293, "usage_type": "attribute"}]} +{"seq_id": "498533602", "text": "import csv\nimport json\nimport os\nimport uuid\nfrom datetime import datetime\nfrom io import StringIO\nfrom pprint import pprint\n\nimport PIL\nimport pyinvoice\nfrom barcode.writer import ImageWriter\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.models import User\nfrom django.core.files.storage import FileSystemStorage\nfrom django.db import IntegrityError\nfrom django.http import JsonResponse, HttpResponse\n\n# Create your views here.\nfrom django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.utils.decorators import method_decorator\nfrom django.views import generic\nfrom django.views.decorators.csrf import csrf_exempt\nfrom pyinvoice.models import InvoiceInfo, ServiceProviderInfo, Transaction, ClientInfo\nfrom pyinvoice.templates import SimpleInvoice\nfrom reportlab.graphics import shapes, barcode\n\nfrom app.models import Customer, CategoryItemPhoneRepair, Token, TokenManager, Category, Phone, Item, Address, \\\n PhoneNumber, Repair, RepairState, Invoice, PaymentMethod, RepairPayment, Photos, Promo, Quotation, QuotationDetail, \\\n FrenchAddress, Labels\nimport labels\nfrom reportlab.lib import colors\nimport barcode\n\nfrom app.venv import GenericFunctions\nfrom app.venv.GenericFunctions import newClient, newPhone, newAddress, delPhone, delClient, getClientAddress, \\\n newQuotation, addCatToQuotation\n\n\ndef make_pdf(empty, data, repairer):\n # Create an A4 portrait (210mm x 297mm) sheet with 2 columns and 8 rows of\n # labels. Each label is 90mm x 25mm with a 2mm rounded corner. The margins are\n # automatically calculated.\n specs = labels.Specification(210, 297, 2, 8, 100, 35, corner_radius=0,\n left_padding=5, top_padding=5, bottom_padding=5, right_padding=5,\n padding_radius=0, top_margin=8, bottom_margin=9, right_margin=5, left_margin=5)\n\n def draw_label(label, width, height, dt):\n # Write the title.\n sn = dt[\"sn\"]\n cat = \"Cat : \" + dt[\"cat\"]\n date = \"Date d'achat : \" + dt[\"date\"]\n snx = \"Sn : \" + sn\n label.add(shapes.String(0, height - 12, snx, fontName=\"Helvetica\", fontSize=15))\n label.add(shapes.String(0 / 4.0, height - 27, cat, fontName=\"Helvetica\", fontSize=15))\n label.add(shapes.String(0 / 4.0, height - 43, date, fontName=\"Helvetica\", fontSize=15))\n EAN = barcode.get_barcode_class('code128')\n ean = EAN(sn, writer=ImageWriter())\n name = ean.save('app/static/barcode/'+dt[\"sn\"])\n\n label.add(shapes.Image(0, -10, width, 35, name))\n\n sheet = labels.Sheet(specs, draw_label, border=True)\n for key, value in empty.items():\n sheet.partial_page(key, value)\n for each in data:\n sheet.add_label(each)\n # sheet.add_label(\"Oversized label here\")\n sn = str(uuid.uuid4())[:8]\n sheet.save('app/static/labels/'+sn+'.pdf')\n Labels.objects.create(file_name=sn+'.pdf', repairer=repairer)\n return sn+'.pdf'\n\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass LogInView(generic.View): # Login View : WORKING\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n token = str(uuid.uuid4())\n data = {'token': token}\n user = authenticate(username=result['username'], password=result['password'])\n if user is not None:\n user = User.objects.get(username=result['username'])\n Token.objects.update_or_create(\n user=user,\n defaults={'token': token},\n )\n # Token.objects.update(token=result['token'], user=user)\n data['success'] = True\n dt = {\"firstname\": user.first_name, \"famillyname\": user.last_name, \"id\": user.id, \"mail\": user.email}\n else:\n data['success'] = False\n dt = {\"firstname\": None, \"famillyname\": None, \"id\": None, \"mail\": None}\n data['repairer_infos'] = dt\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass SearchView(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n users = []\n total = 0\n if TokenManager.IsAdmin(result['username'], result['token']):\n data['success'] = \"true\"\n if result['action'] == 0: # Search by mail\n for customer in Customer.objects.filter(mail__icontains=result['field']).order_by('cp', 'famillyname',\n 'firstname'):\n user = {\"firstname\": customer.firstname, \"famillyname\": customer.famillyname, \"id\": customer.id,\n \"mail\": customer.mail, \"cp\": customer.cp}\n users.append(user)\n\n elif result['action'] == 1: # Search by firstname and famillyname\n for customer in Customer.objects.filter(firstname__icontains=result['firstname'],\n famillyname__icontains=result['famillyname'],\n cp__contains=result['cp']).order_by('cp',\n 'famillyname',\n 'firstname'):\n user = {\"firstname\": customer.firstname, \"famillyname\": customer.famillyname, \"id\": customer.id,\n \"mail\": customer.mail, \"cp\": customer.cp}\n users.append(user)\n elif result['action'] == 2: # Search by Order No : WORKING BUT JSON NOT RIGHT\n dt = {}\n repairs = []\n for repair in CategoryItemPhoneRepair.objects.filter(repair_id=result['field']):\n dt['user'] = {\"firstname\": repair.repair.customer.firstname,\n \"famillyname\": repair.repair.customer.famillyname,\n \"customerid\": repair.repair.customer.id,\n \"customermail\": repair.repair.customer.mail,\n \"customercp\": repair.repair.customer.cp}\n try:\n other = repair.category.others.get().category\n except repair.category.DoesNotExist:\n cat_other = \"\"\n else:\n cat_other = other\n # override prices\n if repair.custom_price == 0:\n price = repair.category.normal_price\n else:\n price = repair.custom_price\n repair = {\"sn\": repair.item.sn,\n 'cat': repair.category.category,\n 'req': cat_other,\n 'price': price}\n repairs.append(repair)\n total = price + total\n users.append(dt)\n data['finalprice'] = total\n users.append(repairs)\n elif result['action'] == 3: # get all customers\n for customer in Customer.objects.all().order_by('cp', 'famillyname', 'firstname'):\n user = {\"firstname\": customer.firstname, \"famillyname\": customer.famillyname, \"id\": customer.id,\n \"mail\": customer.mail, \"cp\": customer.cp}\n users.append(user)\n elif result['action'] == 4: # get by id\n for customer in Customer.objects.filter(id=result['id']).order_by('cp', 'famillyname', 'firstname'):\n user = {\"firstname\": customer.firstname, \"famillyname\": customer.famillyname, \"id\": customer.id,\n \"mail\": customer.mail, \"cp\": customer.cp}\n print(user)\n users.append(user)\n else:\n data['success'] = \"false\"\n\n data['data'] = {'user': users, \"mode\": result['action']}\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass AddStockView(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n print(str(uuid.uuid4())[:8])\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n users = []\n if TokenManager.IsAdmin(result['username'], result['token']):\n items = []\n for item in result['items']:\n sn = str(uuid.uuid4())[:8]\n #cat = Category.objects.get(category=item['cat'])\n #item_object = Item.objects.create(buy_sn=item['buy_sn'], sn=sn,\n # buy_price=float(item['buy_price'].replace(',', '.')))\n #CategoryItemPhoneRepair.objects.create(item=item_object, category=cat)\n items.append({\"sn\": sn, \"cat\": item['cat'], \"date\": str(datetime.now().strftime(\"%d/%m/%Y\"))})\n previous_empty = 1\n i = 0\n dt = []\n full = {}\n print(result['labels'])\n for empty in result['labels']:\n i += 1\n if previous_empty != int(empty['sheet']):\n full[int(previous_empty)] = dt\n dt = [[int(empty['x']), int(empty['y'])]]\n previous_empty = int(empty['sheet'])\n elif int(i) == int(len(result['labels'])):\n dt.append([int(empty['x']), int(empty['y'])])\n full[int(previous_empty)] = dt\n previous_empty = empty['sheet']\n else:\n dt.append([int(empty['x']), int(empty['y'])])\n\n print(full)\n repairer = User.objects.get(username=result['username'])\n pdf_name = make_pdf(full, items, repairer)\n\n data['success'] = True\n data['msg'] = 'Le pdf a été généré, vous pouvez y acceder avec ce [lien](/label/'+pdf_name+') !'\n else:\n data['success'] = False\n data['msg'] = 'Reconnectez vous !'\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass AddOrUpdateCatalogEntryView(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n if TokenManager.IsAdmin(result['username'], result['token']):\n try:\n if result['other'] != None:\n other = Category.objects.get(id=result['other'])\n else:\n other = None\n catalog_price = result['catalog_price']\n detail = result['detail']\n threshold = result['threshold']\n ioi = result['ioi']\n photo_id = result['photo_id']\n phone = Phone.objects.get(id=result['phone'])\n obj, created = Category.objects.update_or_create(\n category=result['cat'],\n defaults={'normal_price': catalog_price,\n 'detail': detail,\n 'threshold': threshold,\n 'others': other,\n 'phone': phone,\n 'is_on_invoice': ioi,\n 'photo_id': photo_id},\n )\n data['success'] = True\n\n if created is True:\n data['msg'] = 'La référence a été crée'\n else:\n data['msg'] = 'La référence a été modifiée'\n except Category.DoesNotExist:\n data['success'] = False\n data['msg'] = 'Erreur dans la REF catalogue pour \"Other\"'\n except Phone.DoesNotExist:\n data['success'] = False\n data['msg'] = 'Erreur dans l\\'ID du téléphone'\n\n\n else:\n data['success'] = \"false\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass AddPhoneView(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n if TokenManager.IsAdmin(result['username'], result['token']):\n try:\n Phone.objects.create(model=result['model'], detail=result['caption'])\n data['success'] = \"true\"\n except IntegrityError:\n data['success'] = \"false\"\n else:\n data['success'] = \"false\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass AddressesByCustomerView(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n users = []\n if TokenManager.IsAdmin(result['username'], result['token']):\n for address in Address.objects.filter(customer_id=result['customerid']):\n user = {\"no\": address.no, \"street\": address.street, \"city\": address.city,\n \"cp\": address.cp, \"country\": address.country, 'label': address.label}\n users.append(user)\n data['success'] = \"true\"\n data['addresses'] = users\n else:\n data['success'] = \"false\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass RepairByCustomerView(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n items = {}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n for repair in CategoryItemPhoneRepair.objects.filter(repair__customer_id=result['customerid']):\n print(repair)\n print(\"_______\")\n try:\n other = {'cat': repair.category.others.get().category,\n 'photo': repair.category.others.get().photo.file}\n except repair.category.DoesNotExist:\n cat_other = {'none': 'none'}\n else:\n cat_other = other\n # override prices\n if repair.custom_price == 0:\n price = repair.category.normal_price\n else:\n price = repair.custom_price\n dt = {\"sn\": repair.item.sn,\n 'cat': repair.category.category,\n 'req': cat_other,\n 'price': price,\n 'photo': repair.category.photo.file}\n\n items[repair.repair.id] = dt\n total = price + total\n\n data['success'] = \"true\"\n data['cart_content'] = items\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllStockView(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n\n phone = {}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n for p in Phone.objects.all().order_by('detail'):\n items = {}\n for c in Category.objects.filter(phone=p).order_by('category'):\n\n cc = CategoryItemPhoneRepair.objects.filter(repair=None, category=c)\n item = []\n if cc.count() != 0:\n for i in cc:\n repair = {\"sn\": i.item.sn,\n 'cat': i.category.category,\n 'req': \"cat_other\",\n 'price': \"price\",\n 'detail': i.category.detail,\n 'photo': i.category.photo.file,\n 'date_buy': i.item.date_buy}\n item.append(repair)\n\n else:\n item = [{\"sn\": \"none\",\n 'cat': \"none\",\n 'req': \"none\",\n 'price': \"none\",\n 'photo': \"none\",\n 'date_buy': None,\n 'detail': None}]\n items[c.category+' : '+c.detail.splitlines()[0]] = item\n phone[p.model]= items\n data['success'] = \"true\"\n data['stock'] = phone\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetItemFromCatInStock(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n items = {}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n for c in Category.objects.filter(category=result['cat']):\n cc = CategoryItemPhoneRepair.objects.filter(repair=None, category=c)\n item = []\n if cc.count() != 0:\n for i in cc:\n if i.category.others is None:\n repair = {\"sn\": i.item.sn,\n 'cat': i.category.category,\n 'req': \"none\",\n 'price': i.category.normal_price,\n 'photo': i.category.photo.file}\n else:\n repair = {\"sn\": i.item.sn,\n 'cat': i.category.category,\n 'req': i.category.others.category,\n 'price': i.category.normal_price,\n 'photo': i.category.photo.file}\n item.append(repair)\n\n else:\n item = [{\"sn\": \"none\",\n 'cat': \"none\",\n 'req': \"none\",\n 'price': \"none\",\n 'photo': \"none\"}]\n items[c.category] = item\n\n data['success'] = \"true\"\n data['stock'] = item\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetItemFromRepair(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n items = {}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n cc = CategoryItemPhoneRepair.objects.filter(repair_id=result['repair_id'])\n item = []\n if cc.count() != 0:\n for i in cc:\n if i.category.others is None:\n repair = {\"sn\": i.item.sn,\n 'cat': i.category.category,\n 'req': \"none\",\n 'price': i.item.sell_price,\n 'photo': i.category.photo.file,\n 'coef': i.item.sell_promo.coef}\n else:\n repair = {\"sn\": i.item.sn,\n 'cat': i.category.category,\n 'req': i.category.others.category,\n 'price': i.item.sell_price,\n 'photo': i.category.photo.file,\n 'coef': i.item.sell_promo.coef}\n item.append(repair)\n\n else:\n item = [{\"sn\": \"none\",\n 'cat': \"none\",\n 'req': \"none\",\n 'price': \"none\",\n 'photo': \"none\"}]\n\n data['success'] = \"true\"\n data['items'] = item\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetQuotationDetailView(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n items = {}\n total = 0\n if TokenManager.IsAdmin(result['username'], result['token']):\n qd = QuotationDetail.objects.filter(quotation_id=result['quotation_id'])\n item = []\n if qd.count() != 0:\n for i in qd:\n repair = {'cat': i.category.category,\n\n 'sell_price': i.sell_price,\n 'catalog_price': i.category.normal_price,\n 'photo': i.category.photo.file,\n 'coef': i.sell_promo.coef}\n item.append(repair)\n\n else:\n item = [{\"sn\": \"none\",\n 'cat': \"none\",\n 'req': \"none\",\n 'price': \"none\",\n 'photo': \"none\"}]\n\n data['success'] = \"true\"\n data['catalog_items'] = item\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetInfoFromRepair(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n repair = {}\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n\n try:\n r = Repair.objects.get(id=result['repair_id'])\n repair = {'id': r.id,\n 'status': r.status.state}\n except Repair.DoesNotExist:\n repair = {\"id\": \"none\",\n 'status': \"none\"}\n\n data['success'] = \"true\"\n data['repair'] = repair\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetPaymentFromRepair(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n items = {}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n cc = RepairPayment.objects.filter(repair_id=result['repair_id'])\n payments = []\n if cc.count() != 0:\n for i in cc:\n payment = {\"pid\": i.id_payment,\n 'payment_date': i.date_paid,\n 'amount': i.amount,\n 'method': i.method.method}\n payments.append(payment)\n\n else:\n payments = [{\"pid\": \"none\",\n 'payment_date': \"none\",\n 'amount': \"none\",\n 'method': \"none\"}]\n\n data['success'] = \"true\"\n data['payments'] = payments\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllUserInfos(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n user = {}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n c = Customer.objects.filter(id=result['user_id'])\n for u in c.values():\n\n # pprint(u)\n for k, v in u.items():\n user[k] = v\n try:\n address = getClientAddress(c[0].id)\n user['address'] = {'id':address.id,\n 'no':address.no,\n 'street':address.street,\n 'city':address.city,\n 'cp':address.cp,\n 'country': address.country}\n except:\n user['address'] = \"No address\"\n data['user'] = user\n data['success'] = \"true\"\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass LinkItemToRepair(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n repair = Repair.objects.get(id=result['repair_id'])\n item = Item.objects.get(sn=result['item_sn'].lower())\n\n CategoryItemPhoneRepair.objects.filter(item=item).update(repair=repair)\n promo = Promo.objects.get(id=result['promo_id'])\n item.sell_price = result['final_price']\n item.sell_promo = promo\n item.save()\n data['success'] = \"true\"\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass UnLinkItemToRepair(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n\n try:\n item = Item.objects.get(sn=result['item_sn'].lower())\n item.sell_price = 0\n item.sell_promo = Promo.objects.get(tag=\"NP\")\n item.save()\n CIPR = CategoryItemPhoneRepair.objects.get(item=item)\n CIPR.repair = None\n CIPR.save()\n data['success'] = \"true\"\n except:\n data['success'] = 'false'\n data['msg'] = 'Erreur de suppression ...'\n\n\n\n else:\n data['success'] = 'false'\n data['msg'] = 'Reconnection necessaire !'\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllUserRepairs(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n repairs = []\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n for r in Repair.objects.filter(customer_id=result['user_id']).values():\n repair = {}\n pprint(r)\n for k, v in r.items():\n if k == 'status_id':\n repair['status'] = RepairState.objects.get(id=v).state\n elif k == 'repairer_id':\n usr = User.objects.get(id=v)\n repair['repairer'] = usr.first_name + \" \" + usr.last_name\n else:\n repair[k] = v\n repairs.append(repair)\n data['repairs'] = repairs\n data['success'] = \"true\"\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetQuotationListByUser(generic.View):\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n repairs = []\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n for q in Quotation.objects.filter(customer_id=result['client_id']):\n repair = {}\n repair['id'] = q.id\n repair['date_add'] = q.date_add\n repair['date_accepted'] = q.date_accepted\n repair['repairer'] = {'id':q.repairer.id, 'last_name':q.repairer.last_name ,'familly_name': q.repairer.first_name}\n if q.repair is not None:\n repair['repair_id'] = q.repair.id\n repairs.append(repair)\n data['repairs'] = repairs\n data['success'] = \"true\"\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\nclass GetLabels(generic.View):\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n # print(request.GET[\"test\"])\n make_pdf([[1,1],[1,2]], [{\"sn\": \"987654345678\", \"cat\": \"MLJGHJK\", \"date\": \"12/12/12\"},{\"sn\": \"45678765456\", \"cat\": \"LKJHGYUIKN\", \"date\": \"12/4567/12\"}])\n return render(request, \"test.html\")\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass AddItem(generic.View):\n http_method_names = ['get', 'post']\n\n def get(self, request, *args, **kwargs):\n # print(request.GET[\"test\"])\n # make_pdf(\"\", [{\"sn\": \"987654345678\", \"cat\": \"MLJGHJK\", \"date\": \"12/12/12\"},{\"sn\": \"45678765456\", \"cat\": \"LKJHGYUIKN\", \"date\": \"12/4567/12\"}])\n return render(request, \"additemform.html\")\n\n def post(self, request, *args, **kwargs):\n raw = request.body.decode('utf8')\n\n results = raw.split(\"&\")\n full = {}\n for result in results:\n each = result.split(\"=\")\n full[each[0]] = each[1]\n print(full['t'])\n if full['t'] != \"\" and full['t'] != \"\":\n print(\"test\")\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetSpecificCat(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n cat = []\n result = json.loads(request.body.decode('utf8'))\n for c in Category.objects.filter(category__contains=result['cat']).order_by('category'):\n if c.others is not None:\n cat.append({'id': c.id,\n 'photo': c.photo.file,\n 'detail': c.detail,\n 'cat': c.category,\n 'ioi': c.is_on_invoice,\n 'catalog_price': c.normal_price,\n 'other': {'id': c.others.id,\n 'photo': c.others.photo.file,\n 'detail': c.others.detail,\n 'cat': c.others.category,\n 'ioi': c.others.is_on_invoice}\n })\n else:\n cat.append({'id': c.id,\n 'photo': c.photo.file,\n 'detail': c.detail,\n 'cat': c.category,\n 'ioi': c.is_on_invoice,\n 'catalog_price': c.normal_price,\n 'other': {'id': None,\n 'photo': None,\n 'detail': None,\n 'cat': None,\n 'ioi': None}\n })\n return JsonResponse(cat, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllCat(generic.View): # Search View\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n data = {'success': True}\n phone = {}\n for p in Phone.objects.all().order_by('detail'):\n cat = []\n for c in Category.objects.filter(phone=p).order_by('category'):\n if c.others is not None:\n cat.append({'id': c.id,\n 'photo': {\n 'file': c.photo.file,\n 'id': c.photo.id\n },\n 'detail': c.detail,\n 'cat': c.category,\n 'ioi': c.is_on_invoice,\n 'catalog_price' : c.normal_price,\n 'threshold': c.threshold,\n 'phone':{\n 'id': c.phone.id,\n 'phone': c.phone.detail\n },\n 'other': {'id': c.others.id,\n 'photo': c.others.photo.file,\n 'detail': c.others.detail,\n 'cat': c.others.category,\n 'ioi': c.others.is_on_invoice}\n })\n else:\n cat.append({'id': c.id,\n 'photo': {\n 'file': c.photo.file,\n 'id': c.photo.id\n },\n 'detail': c.detail,\n 'cat': c.category,\n 'ioi': c.is_on_invoice,\n 'catalog_price' : c.normal_price,\n 'threshold': c.threshold,\n 'phone':{\n 'id': c.phone.id,\n 'phone': c.phone.detail\n },\n 'other': {'id': None,\n 'photo': None,\n 'detail': None,\n 'cat': None,\n 'ioi': None}\n })\n\n phone[p.model] = cat\n data['catalog'] = phone\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllPromo(generic.View): # Search View\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n promos = []\n for p in Promo.objects.all().order_by('-coef'):\n promo = {'id':p.id,\n 'name': p.name,\n 'coef':p.coef}\n promos.append(promo)\n return JsonResponse(promos, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllPhoneModel(generic.View): # Search View\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n cat = []\n for p in Phone.objects.all().order_by('detail'):\n cat.append({'id': p.id,\n 'name': p.detail,\n 'model': p.model})\n return JsonResponse(cat, safe=False)\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllOtherCatView(generic.View): # Search View\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n cat = []\n for oc in Category.objects.filter(is_on_invoice=False).order_by('category'):\n cat.append({'id': oc.id,\n 'cat': oc.category,\n 'detail': oc.detail})\n return JsonResponse(cat, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllPaymentMethod(generic.View): # Search View\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n cat = []\n for p in PaymentMethod.objects.all().order_by('method'):\n cat.append({'id': p.id,\n 'sm': p.short_method,\n 'method': p.method})\n return JsonResponse(cat, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GenerateInvoice(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n GenericFunctions.InvoiceMaker(repair_id=result['repair_id'], customer_id=result['customer_id'])\n return JsonResponse({'success': \"true\"}, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass EditPaymentStatus(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n GenericFunctions.editPaymentStatus(repair_id=result['repair_id'], customer_id=result['customer_id'])\n return JsonResponse({'success': \"true\"}, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass Pay(generic.View): # Pay a repair\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n print(request.body.decode('utf8'))\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n\n items = {}\n total = 0\n print(result['username'], result['token'])\n if TokenManager.IsAdmin(result['username'], result['token']):\n repair = Repair.objects.get(id=result['repair_id'])\n if repair.status.short_state != \"Q\":\n for pm in result['payment_method']:\n if pm['amount'] != '0':\n method = PaymentMethod.objects.get(short_method=pm['sm'])\n try:\n date = datetime.strptime(pm['date_paid'], '%d/%m/%Y').strftime('%Y-%m-%d')\n except:\n date = datetime.now()\n\n payment = RepairPayment.objects.create(method=method, repair=repair, amount=pm['amount'],\n id_payment=str(uuid.uuid4())[:8],\n date_paid=date)\n #GenericFunctions.editPaymentStatus(repair_id=repair.id, customer_id=repair.customer.id)\n #GenericFunctions.InvoiceMaker(repair_id=repair.id, customer_id=repair.customer.id)\n data['msg'] = \"done\"\n else:\n data['msg'] = \"Still in quoattion modify repair type\"\n data['success'] = \"true\"\n\n\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass UnPay(generic.View): # Pay a repair\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n print(request.body.decode('utf8'))\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n if TokenManager.IsAdmin(result['username'], result['token']):\n try:\n RepairPayment.objects.filter(id_payment=result['payment_id']).delete()\n data['success'] = \"true\"\n except:\n data['success'] = \"false\"\n data['msg'] = \"erreur deleting\"\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass NewRepair(generic.View): # create a repair\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n\n if TokenManager.IsAdmin(result['username'], result['token']):\n client_id = result['client_id']\n date_add = result['date_add']\n status = RepairState.objects.get(short_state='Q')\n if date_add == 'today':\n date = datetime.now()\n else:\n date = datetime.strptime(date_add, '%d/%m/%Y').strftime('%Y-%m-%d')\n client = Customer.objects.get(id=client_id)\n Repair.objects.create(customer=client, date_add=date, phone_password=None, date_repaired=None,\n status=status)\n data['success'] = \"true\"\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass newClientView(generic.View): # create a repair\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n\n if TokenManager.IsAdmin(result['username'], result['token']):\n success_newClient, client = newClient(result['firstname'],result['famillyname'],result['mail'],result['address_cp'])\n print(success_newClient)\n if success_newClient is True:\n success_newPhone, phone = newPhone(result['phone_number'], client)\n if success_newPhone is True:\n success_newAddress, address = newAddress(result['address_no'], result['address_street'], result['address_city'],\n result['address_cp'], result['address_country'], client)\n if success_newAddress is True:\n data['success'] = \"true\"\n data['msg'] = \"Client créé avec succes\"\n else:\n data['success'] = \"false\"\n data['msg'] = \"Verifiez l'adresse\"\n delPhone(phone.id)\n delClient(client.id)\n else:\n data['success'] = \"false\"\n data['msg'] = \"Verifiez le téléphone\"\n delClient(client.id)\n else:\n data['success'] = \"false\"\n data['msg'] = \"Verifiez [Nom, Prenom, Mail, CP]\"\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass newQuotationView(generic.View): # create a repair\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n\n if TokenManager.IsAdmin(result['username'], result['token']):\n client = Customer.objects.get(id=result['client_id'])\n repairer = User.objects.get(username=result['username'])\n success_newQuotation, quotation = newQuotation(client=client, repairer=repairer)\n print(success_newQuotation)\n if success_newQuotation is True:\n data['success'] = \"true\"\n data['msg'] = \"Devis n°\"+ quotation +\" créé\"\n else:\n data['success'] = \"false\"\n data['msg'] = \"Erreur\"\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass AddCatToQuotationView(generic.View): # create a repair\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {'token': result['token']}\n\n if TokenManager.IsAdmin(result['username'], result['token']):\n category = Category.objects.get(category=result['category'])\n quotation = Quotation.objects.get(id=result['quotation_id'])\n success_addCatToQuotation = addCatToQuotation(quotation=quotation, category=category, promo_id=result['promo_id'], sell_price=result['sell_price'])\n print(success_addCatToQuotation)\n if success_addCatToQuotation is True:\n data['success'] = \"true\"\n data['msg'] = \"Cate ajoutée\"\n else:\n data['success'] = \"false\"\n data['msg'] = \"Erreur\"\n else:\n data['success'] = \"no adm\"\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass TestIfLogedView(generic.View): # create a repair\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n result = json.loads(request.body.decode('utf8'))\n data = {}\n username = result.get('username')\n token = result.get('token')\n if username and token:\n if TokenManager.IsAdmin(result['username'], result['token']):\n data['success'] = True\n else:\n data['success'] = False\n else:\n data['success'] = False\n rt = JsonResponse(data, safe=False)\n return rt\n\n\n #TODO delete a repair : unlink the items, delete the payments and DELETE repair itself\n #TODO add msg to json response for every functions\n #TODO create generic functions for multi used functions : unlink item, delete payments ...\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass simple_upload(generic.View): # create a repair\n\n http_method_names = ['post', 'get']\n\n def get(self, request, *args, **kwargs):\n with open('app/static/adresses-13.csv') as f:\n reader = csv.reader(f, delimiter=';')\n va=0\n for row in reader:\n # row = row[0].split(';')\n va = va + 1\n print(va)\n print(row)\n _, created = FrenchAddress.objects.get_or_create(\n id_file=row[0],\n numero=row[2],\n rep=row[3],\n nom_voie=row[4],\n code_postal=row[5],\n code_insee=row[6],\n nom_commune=row[7],\n code_insee_ancienne_commune=row[8],\n nom_ancienne_commune=row[9],\n lon=row[12],\n lat=row[13],\n nom_afnor=row[17],\n )\n # creates a tuple of the new object or\n # current object and a boolean of if it was created\n #pprint(FrenchAddress.objects.get(id_file='13215_8670_00014'))\n return render(request, 'up.html')\n def post(self, request, *args, **kwargs):\n if request.FILES['myfile']:\n myfile = request.FILES['myfile']\n result = GenericFunctions.upload_img(myfile)\n if result['success']:\n print(result['file_name'])\n va = 0\n with open('app/static/'+result['file_name']) as f:\n reader = csv.reader(f, delimiter=';')\n for row in reader:\n #row = row[0].split(';')\n va = va + 1\n print(va)\n print(row)\n _, created = FrenchAddress.objects.get_or_create(\n id_file=row[0],\n numero = row[2],\n rep = row[3],\n nom_voie =row[4],\n code_postal =row[5],\n code_insee =row[6],\n nom_commune =row[7],\n code_insee_ancienne_commune =row[8],\n nom_ancienne_commune = row[9],\n lon = row[12],\n lat = row[13],\n nom_afnor = row[17],\n )\n # creates a tuple of the new object or\n # current object and a boolean of if it was created\n return render(request, 'up.html', {\n 'uploaded_file_url': result['file_name']\n })\n else:\n return render(request, 'up.html', {\n 'error': result['msg']\n })\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass ImportPhotosForCatalog(generic.View): # create a repair\n\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n if request.FILES['myfile']:\n myfile = request.FILES['myfile']\n result = GenericFunctions.upload_img(myfile)\n if result['success']:\n print(result['file_name'])\n return render(request, 'up.html', {\n 'uploaded_file_url': result['file_name']\n })\n else:\n return render(request, 'up.html', {\n 'error': result['msg']\n })\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass ImgView(generic.View): # create a repair\n\n http_method_names = ['get']\n\n def get(self, request, file, *args, **kwargs):\n if Photos.objects.filter(file=file).count() == 1:\n image_data = open(\"app/static/img/\" + file, \"rb\").read()\n ext = file[-4:]\n if ext == '.jpg':\n return HttpResponse(image_data, content_type=\"image/jpg\")\n elif ext == '.png':\n return HttpResponse(image_data, content_type=\"image/png\")\n else:\n image_data = open(\"app/static/img/no_img.png\", \"rb\").read()\n return HttpResponse(image_data, content_type=\"image/png\")\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass LabelsView(generic.View): # create a repair\n\n http_method_names = ['get']\n\n def get(self, request, file, *args, **kwargs):\n if Photos.objects.filter(file=file).count() == 1:\n image_data = open(\"app/static/img/\" + file, \"rb\").read()\n ext = file[-4:]\n if ext == '.jpg':\n return HttpResponse(image_data, content_type=\"image/jpg\")\n elif ext == '.png':\n return HttpResponse(image_data, content_type=\"image/png\")\n else:\n pdf_data = open(\"app/static/labels/\" + file, \"rb\").read()\n return HttpResponse(pdf_data, content_type=\"application/pdf\")\n\n\"\"\"\nViews for the selling website (public views)\n\"\"\"\n\n\"\"\"\nPublic views for getting all the products we sell\n\"\"\"\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllCatPublic(generic.View): # Search View\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n print(request.body.decode('utf8'))\n print(GenericFunctions.get_client_ip(request))\n result = json.loads(request.body.decode('utf8'))\n cat = []\n for c in Category.objects.filter(phone__model__contains=result['phone']).order_by('category'):\n if c.is_on_invoice:\n cat.append({'id': c.id,\n 'photo': c.photo.file,\n 'detail': c.detail,\n 'cat': c.category\n })\n return JsonResponse(cat, safe=False)\n\n\"\"\"\nPublic views for getting all the Phones\n\"\"\"\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAllPhonePublic(generic.View): # Search View\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n phones = []\n for p in Phone.objects.all().order_by('model'):\n phones.append({'id': p.id,\n 'photo': \"\",\n 'detail': p.detail,\n 'model': p.model\n })\n return JsonResponse(phones, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass ajax(generic.View): # Search View\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n phones = []\n for p in Phone.objects.all().order_by('model'):\n phones.append({'id': p.id,\n 'photo': \"\",\n 'detail': p.detail,\n 'model': p.model\n })\n return JsonResponse(phones, safe=False)", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 53739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "labels.Specification", "line_number": 44, "usage_type": "call"}, {"api_name": "reportlab.graphics.shapes.String", "line_number": 54, "usage_type": "call"}, {"api_name": "reportlab.graphics.shapes", "line_number": 54, "usage_type": "name"}, {"api_name": "reportlab.graphics.shapes.String", "line_number": 55, "usage_type": "call"}, {"api_name": "reportlab.graphics.shapes", "line_number": 55, "usage_type": "name"}, {"api_name": "reportlab.graphics.shapes.String", "line_number": 56, "usage_type": "call"}, {"api_name": "reportlab.graphics.shapes", "line_number": 56, "usage_type": "name"}, {"api_name": "barcode.get_barcode_class", "line_number": 57, "usage_type": "call"}, {"api_name": "barcode.writer.ImageWriter", "line_number": 58, "usage_type": "call"}, {"api_name": "reportlab.graphics.shapes.Image", "line_number": 61, "usage_type": "call"}, {"api_name": "reportlab.graphics.shapes", "line_number": 61, "usage_type": "name"}, {"api_name": "labels.Sheet", "line_number": 63, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 69, "usage_type": "call"}, {"api_name": "app.models.Labels.objects.create", "line_number": 71, "usage_type": "call"}, {"api_name": "app.models.Labels.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "app.models.Labels", "line_number": 71, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 77, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 81, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 86, "usage_type": "name"}, {"api_name": "app.models.Token.objects.update_or_create", "line_number": 87, "usage_type": "call"}, {"api_name": "app.models.Token.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "app.models.Token", "line_number": 87, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 98, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 76, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 76, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 103, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 103, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 107, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 111, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 111, "usage_type": "name"}, {"api_name": "app.models.Customer.objects.filter", "line_number": 114, "usage_type": "call"}, {"api_name": "app.models.Customer.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "app.models.Customer", "line_number": 114, "usage_type": "name"}, {"api_name": "app.models.Customer.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "app.models.Customer.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "app.models.Customer", "line_number": 121, "usage_type": "name"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "app.models.CategoryItemPhoneRepair", "line_number": 132, "usage_type": "name"}, {"api_name": "app.models.Customer.objects.all", "line_number": 159, "usage_type": "call"}, {"api_name": "app.models.Customer.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "app.models.Customer", "line_number": 159, "usage_type": "name"}, {"api_name": "app.models.Customer.objects.filter", "line_number": 164, "usage_type": "call"}, {"api_name": "app.models.Customer.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "app.models.Customer", "line_number": 164, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 173, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 102, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 102, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 178, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 178, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 182, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 183, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 186, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 186, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 194, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 214, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 214, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 214, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 222, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 177, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 177, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 228, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 228, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 232, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 234, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 234, "usage_type": "name"}, {"api_name": "app.models.Category.objects.get", "line_number": 237, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 237, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 237, "usage_type": "name"}, {"api_name": "app.models.Phone.objects.get", "line_number": 245, "usage_type": "call"}, {"api_name": "app.models.Phone.objects", "line_number": 245, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 245, "usage_type": "name"}, {"api_name": "app.models.Category.objects.update_or_create", "line_number": 246, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 246, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 246, "usage_type": "name"}, {"api_name": "app.models.Category.DoesNotExist", "line_number": 262, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 262, "usage_type": "name"}, {"api_name": "app.models.Phone.DoesNotExist", "line_number": 265, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 265, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 272, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 227, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 227, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 277, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 277, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 281, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 283, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 283, "usage_type": "name"}, {"api_name": "app.models.Phone.objects.create", "line_number": 285, "usage_type": "call"}, {"api_name": "app.models.Phone.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 285, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 287, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 291, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 276, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 276, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 296, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 296, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 300, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 303, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 303, "usage_type": "name"}, {"api_name": "app.models.Address.objects.filter", "line_number": 304, "usage_type": "call"}, {"api_name": "app.models.Address.objects", "line_number": 304, "usage_type": "attribute"}, {"api_name": "app.models.Address", "line_number": 304, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 312, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 295, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 295, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 317, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 317, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 321, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 326, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 326, "usage_type": "name"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects.filter", "line_number": 327, "usage_type": "call"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects", "line_number": 327, "usage_type": "attribute"}, {"api_name": "app.models.CategoryItemPhoneRepair", "line_number": 327, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 356, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 316, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 316, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 361, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 361, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 365, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 371, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 371, "usage_type": "name"}, {"api_name": "app.models.Phone.objects.all", "line_number": 372, "usage_type": "call"}, {"api_name": "app.models.Phone.objects", "line_number": 372, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 372, "usage_type": "name"}, {"api_name": "app.models.Category.objects.filter", "line_number": 374, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 374, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 374, "usage_type": "name"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects.filter", "line_number": 376, "usage_type": "call"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects", "line_number": 376, "usage_type": "attribute"}, {"api_name": "app.models.CategoryItemPhoneRepair", "line_number": 376, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 404, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 360, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 360, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 409, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 409, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 413, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 418, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 418, "usage_type": "name"}, {"api_name": "app.models.Category.objects.filter", "line_number": 419, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 419, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 419, "usage_type": "name"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects.filter", "line_number": 420, "usage_type": "call"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects", "line_number": 420, "usage_type": "attribute"}, {"api_name": "app.models.CategoryItemPhoneRepair", "line_number": 420, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 451, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 408, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 408, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 456, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 456, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 460, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 465, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 465, "usage_type": "name"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects.filter", "line_number": 466, "usage_type": "call"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects", "line_number": 466, "usage_type": "attribute"}, {"api_name": "app.models.CategoryItemPhoneRepair", "line_number": 466, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 498, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 455, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 455, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 503, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 503, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 507, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 511, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 511, "usage_type": "name"}, {"api_name": "app.models.QuotationDetail.objects.filter", "line_number": 512, "usage_type": "call"}, {"api_name": "app.models.QuotationDetail.objects", "line_number": 512, "usage_type": "attribute"}, {"api_name": "app.models.QuotationDetail", "line_number": 512, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 536, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 502, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 502, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 541, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 541, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 545, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 549, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 549, "usage_type": "name"}, {"api_name": "app.models.Repair.objects.get", "line_number": 552, "usage_type": "call"}, {"api_name": "app.models.Repair.objects", "line_number": 552, "usage_type": "attribute"}, {"api_name": "app.models.Repair", "line_number": 552, "usage_type": "name"}, {"api_name": "app.models.Repair.DoesNotExist", "line_number": 555, "usage_type": "attribute"}, {"api_name": "app.models.Repair", "line_number": 555, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 564, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 540, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 540, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 569, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 569, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 573, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 578, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 578, "usage_type": "name"}, {"api_name": "app.models.RepairPayment.objects.filter", "line_number": 579, "usage_type": "call"}, {"api_name": "app.models.RepairPayment.objects", "line_number": 579, "usage_type": "attribute"}, {"api_name": "app.models.RepairPayment", "line_number": 579, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 600, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 568, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 568, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 605, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 605, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 609, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 614, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 614, "usage_type": "name"}, {"api_name": "app.models.Customer.objects.filter", "line_number": 615, "usage_type": "call"}, {"api_name": "app.models.Customer.objects", "line_number": 615, "usage_type": "attribute"}, {"api_name": "app.models.Customer", "line_number": 615, "usage_type": "name"}, {"api_name": "app.venv.GenericFunctions.getClientAddress", "line_number": 622, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 636, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 604, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 604, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 641, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 641, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 645, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 649, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 649, "usage_type": "name"}, {"api_name": "app.models.Repair.objects.get", "line_number": 650, "usage_type": "call"}, {"api_name": "app.models.Repair.objects", "line_number": 650, "usage_type": "attribute"}, {"api_name": "app.models.Repair", "line_number": 650, "usage_type": "name"}, {"api_name": "app.models.Item.objects.get", "line_number": 651, "usage_type": "call"}, {"api_name": "app.models.Item.objects", "line_number": 651, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 651, "usage_type": "name"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects.filter", "line_number": 653, "usage_type": "call"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects", "line_number": 653, "usage_type": "attribute"}, {"api_name": "app.models.CategoryItemPhoneRepair", "line_number": 653, "usage_type": "name"}, {"api_name": "app.models.Promo.objects.get", "line_number": 654, "usage_type": "call"}, {"api_name": "app.models.Promo.objects", "line_number": 654, "usage_type": "attribute"}, {"api_name": "app.models.Promo", "line_number": 654, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 661, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 640, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 640, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 666, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 666, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 670, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 674, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 674, "usage_type": "name"}, {"api_name": "app.models.Item.objects.get", "line_number": 677, "usage_type": "call"}, {"api_name": "app.models.Item.objects", "line_number": 677, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 677, "usage_type": "name"}, {"api_name": "app.models.Promo.objects.get", "line_number": 679, "usage_type": "call"}, {"api_name": "app.models.Promo.objects", "line_number": 679, "usage_type": "attribute"}, {"api_name": "app.models.Promo", "line_number": 679, "usage_type": "name"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects.get", "line_number": 681, "usage_type": "call"}, {"api_name": "app.models.CategoryItemPhoneRepair.objects", "line_number": 681, "usage_type": "attribute"}, {"api_name": "app.models.CategoryItemPhoneRepair", "line_number": 681, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 694, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 665, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 665, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 699, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 699, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 703, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 707, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 707, "usage_type": "name"}, {"api_name": "app.models.Repair.objects.filter", "line_number": 708, "usage_type": "call"}, {"api_name": "app.models.Repair.objects", "line_number": 708, "usage_type": "attribute"}, {"api_name": "app.models.Repair", "line_number": 708, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 710, "usage_type": "call"}, {"api_name": "app.models.RepairState.objects.get", "line_number": 713, "usage_type": "call"}, {"api_name": "app.models.RepairState.objects", "line_number": 713, "usage_type": "attribute"}, {"api_name": "app.models.RepairState", "line_number": 713, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 715, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 715, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 715, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 725, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 698, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 698, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 730, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 730, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 734, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 738, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 738, "usage_type": "name"}, {"api_name": "app.models.Quotation.objects.filter", "line_number": 739, "usage_type": "call"}, {"api_name": "app.models.Quotation.objects", "line_number": 739, "usage_type": "attribute"}, {"api_name": "app.models.Quotation", "line_number": 739, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 753, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 729, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 729, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 757, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 757, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 763, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 767, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 767, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 773, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 766, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 766, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 789, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 789, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 794, "usage_type": "call"}, {"api_name": "app.models.Category.objects.filter", "line_number": 795, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 795, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 795, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 822, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 788, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 788, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 826, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 826, "usage_type": "name"}, {"api_name": "app.models.Phone.objects.all", "line_number": 832, "usage_type": "call"}, {"api_name": "app.models.Phone.objects", "line_number": 832, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 832, "usage_type": "name"}, {"api_name": "app.models.Category.objects.filter", "line_number": 834, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 834, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 834, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 880, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 825, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 825, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 885, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 885, "usage_type": "name"}, {"api_name": "app.models.Promo.objects.all", "line_number": 890, "usage_type": "call"}, {"api_name": "app.models.Promo.objects", "line_number": 890, "usage_type": "attribute"}, {"api_name": "app.models.Promo", "line_number": 890, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 895, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 884, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 884, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 899, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 899, "usage_type": "name"}, {"api_name": "app.models.Phone.objects.all", "line_number": 904, "usage_type": "call"}, {"api_name": "app.models.Phone.objects", "line_number": 904, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 904, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 908, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 898, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 898, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 911, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 911, "usage_type": "name"}, {"api_name": "app.models.Category.objects.filter", "line_number": 916, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 916, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 916, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 920, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 910, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 910, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 924, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 924, "usage_type": "name"}, {"api_name": "app.models.PaymentMethod.objects.all", "line_number": 929, "usage_type": "call"}, {"api_name": "app.models.PaymentMethod.objects", "line_number": 929, "usage_type": "attribute"}, {"api_name": "app.models.PaymentMethod", "line_number": 929, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 933, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 923, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 923, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 937, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 937, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 941, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.InvoiceMaker", "line_number": 943, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions", "line_number": 943, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 944, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 936, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 936, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 948, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 948, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 952, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.editPaymentStatus", "line_number": 954, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions", "line_number": 954, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 955, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 947, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 947, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 959, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 959, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 964, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 970, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 970, "usage_type": "name"}, {"api_name": "app.models.Repair.objects.get", "line_number": 971, "usage_type": "call"}, {"api_name": "app.models.Repair.objects", "line_number": 971, "usage_type": "attribute"}, {"api_name": "app.models.Repair", "line_number": 971, "usage_type": "name"}, {"api_name": "app.models.PaymentMethod.objects.get", "line_number": 975, "usage_type": "call"}, {"api_name": "app.models.PaymentMethod.objects", "line_number": 975, "usage_type": "attribute"}, {"api_name": "app.models.PaymentMethod", "line_number": 975, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 977, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 977, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 979, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 979, "usage_type": "name"}, {"api_name": "app.models.RepairPayment.objects.create", "line_number": 981, "usage_type": "call"}, {"api_name": "app.models.RepairPayment.objects", "line_number": 981, "usage_type": "attribute"}, {"api_name": "app.models.RepairPayment", "line_number": 981, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 982, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 994, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 958, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 958, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 999, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 999, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1004, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 1006, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 1006, "usage_type": "name"}, {"api_name": "app.models.RepairPayment.objects.filter", "line_number": 1008, "usage_type": "call"}, {"api_name": "app.models.RepairPayment.objects", "line_number": 1008, "usage_type": "attribute"}, {"api_name": "app.models.RepairPayment", "line_number": 1008, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1015, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 998, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 998, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1020, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1020, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1024, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 1027, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 1027, "usage_type": "name"}, {"api_name": "app.models.RepairState.objects.get", "line_number": 1030, "usage_type": "call"}, {"api_name": "app.models.RepairState.objects", "line_number": 1030, "usage_type": "attribute"}, {"api_name": "app.models.RepairState", "line_number": 1030, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1032, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1032, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 1034, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1034, "usage_type": "name"}, {"api_name": "app.models.Customer.objects.get", "line_number": 1035, "usage_type": "call"}, {"api_name": "app.models.Customer.objects", "line_number": 1035, "usage_type": "attribute"}, {"api_name": "app.models.Customer", "line_number": 1035, "usage_type": "name"}, {"api_name": "app.models.Repair.objects.create", "line_number": 1036, "usage_type": "call"}, {"api_name": "app.models.Repair.objects", "line_number": 1036, "usage_type": "attribute"}, {"api_name": "app.models.Repair", "line_number": 1036, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1041, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1019, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1019, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1047, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1047, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1051, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 1054, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 1054, "usage_type": "name"}, {"api_name": "app.venv.GenericFunctions.newClient", "line_number": 1055, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.newPhone", "line_number": 1058, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.newAddress", "line_number": 1060, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.delPhone", "line_number": 1068, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.delClient", "line_number": 1069, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.delClient", "line_number": 1073, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 1079, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1046, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1046, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1084, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1084, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1088, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 1091, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 1091, "usage_type": "name"}, {"api_name": "app.models.Customer.objects.get", "line_number": 1092, "usage_type": "call"}, {"api_name": "app.models.Customer.objects", "line_number": 1092, "usage_type": "attribute"}, {"api_name": "app.models.Customer", "line_number": 1092, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 1093, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 1093, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 1093, "usage_type": "name"}, {"api_name": "app.venv.GenericFunctions.newQuotation", "line_number": 1094, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 1104, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1083, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1083, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1109, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1109, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1113, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 1116, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 1116, "usage_type": "name"}, {"api_name": "app.models.Category.objects.get", "line_number": 1117, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 1117, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 1117, "usage_type": "name"}, {"api_name": "app.models.Quotation.objects.get", "line_number": 1118, "usage_type": "call"}, {"api_name": "app.models.Quotation.objects", "line_number": 1118, "usage_type": "attribute"}, {"api_name": "app.models.Quotation", "line_number": 1118, "usage_type": "name"}, {"api_name": "app.venv.GenericFunctions.addCatToQuotation", "line_number": 1119, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 1129, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1108, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1108, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1134, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1134, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1138, "usage_type": "call"}, {"api_name": "app.models.TokenManager.IsAdmin", "line_number": 1143, "usage_type": "call"}, {"api_name": "app.models.TokenManager", "line_number": 1143, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1149, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1133, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1133, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1159, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1159, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 1165, "usage_type": "call"}, {"api_name": "app.models.FrenchAddress.objects.get_or_create", "line_number": 1172, "usage_type": "call"}, {"api_name": "app.models.FrenchAddress.objects", "line_number": 1172, "usage_type": "attribute"}, {"api_name": "app.models.FrenchAddress", "line_number": 1172, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1189, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions.upload_img", "line_number": 1193, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions", "line_number": 1193, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 1198, "usage_type": "call"}, {"api_name": "app.models.FrenchAddress.objects.get_or_create", "line_number": 1204, "usage_type": "call"}, {"api_name": "app.models.FrenchAddress.objects", "line_number": 1204, "usage_type": "attribute"}, {"api_name": "app.models.FrenchAddress", "line_number": 1204, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1220, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1224, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1158, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1158, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1230, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1230, "usage_type": "name"}, {"api_name": "app.venv.GenericFunctions.upload_img", "line_number": 1237, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions", "line_number": 1237, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1240, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1244, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1229, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1229, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1250, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1250, "usage_type": "name"}, {"api_name": "app.models.Photos.objects.filter", "line_number": 1255, "usage_type": "call"}, {"api_name": "app.models.Photos.objects", "line_number": 1255, "usage_type": "attribute"}, {"api_name": "app.models.Photos", "line_number": 1255, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1259, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1261, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1264, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1249, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1249, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1268, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1268, "usage_type": "name"}, {"api_name": "app.models.Photos.objects.filter", "line_number": 1273, "usage_type": "call"}, {"api_name": "app.models.Photos.objects", "line_number": 1273, "usage_type": "attribute"}, {"api_name": "app.models.Photos", "line_number": 1273, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1277, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1279, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1282, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1267, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1267, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1292, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1292, "usage_type": "name"}, {"api_name": "app.venv.GenericFunctions.get_client_ip", "line_number": 1297, "usage_type": "call"}, {"api_name": "app.venv.GenericFunctions", "line_number": 1297, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1298, "usage_type": "call"}, {"api_name": "app.models.Category.objects.filter", "line_number": 1300, "usage_type": "call"}, {"api_name": "app.models.Category.objects", "line_number": 1300, "usage_type": "attribute"}, {"api_name": "app.models.Category", "line_number": 1300, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1307, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1291, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1291, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1313, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1313, "usage_type": "name"}, {"api_name": "app.models.Phone.objects.all", "line_number": 1318, "usage_type": "call"}, {"api_name": "app.models.Phone.objects", "line_number": 1318, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 1318, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1324, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1312, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1312, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 1328, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 1328, "usage_type": "name"}, {"api_name": "app.models.Phone.objects.all", "line_number": 1333, "usage_type": "call"}, {"api_name": "app.models.Phone.objects", "line_number": 1333, "usage_type": "attribute"}, {"api_name": "app.models.Phone", "line_number": 1333, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1339, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1327, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1327, "usage_type": "argument"}]} +{"seq_id": "154219261", "text": "import os\nimport shutil\nimport sys\nfrom datetime import datetime\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\nfrom .backend import (TinyYoloFeature, FullYoloFeature, MobileNetFeature, SqueezeNetFeature, Inception3Feature,\n VGG16Feature, ResNet50Feature, BaseFeatureExtractor)\n\n\nclass BoundBox:\n def __init__(self, xmin, ymin, xmax, ymax, c=None, classes=None):\n self.xmin = xmin\n self.ymin = ymin\n self.xmax = xmax\n self.ymax = ymax\n\n self.c = c\n self.classes = classes\n\n self.label = -1\n self.score = -1\n\n def get_label(self):\n if self.label == -1:\n self.label = np.argmax(self.classes)\n\n return self.label\n\n def get_score(self):\n if self.score == -1:\n self.score = self.classes[self.get_label()]\n return self.score\n\n def __repr__(self):\n \"\"\"\n Helper method for printing the object's values\n :return:\n \"\"\"\n return \"\\n\".format(\n self.xmin,\n self.xmax,\n self.ymin,\n self.ymax,\n self.get_label(),\n self.get_score()\n )\n\n\nclass WeightReader:\n def __init__(self, weight_file):\n self.offset = 4\n self.all_weights = np.fromfile(weight_file, dtype='float32')\n\n def read_bytes(self, size):\n self.offset = self.offset + size\n return self.all_weights[self.offset - size:self.offset]\n\n def reset(self):\n self.offset = 4\n\n\ndef bbox_iou(box1, box2):\n intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])\n intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])\n\n intersect = intersect_w * intersect_h\n\n w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin\n w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin\n\n union = w1 * h1 + w2 * h2 - intersect\n\n return float(intersect) / union\n\n\ndef draw_boxes(image, boxes, labels):\n image_h, image_w, _ = image.shape\n\n color_levels = [0, 255, 128, 64, 32]\n colors = []\n for r in color_levels:\n for g in color_levels:\n for b in color_levels:\n if r == g and r == b: # prevent grayscale colors\n continue\n colors.append((b, g, r))\n\n for box in boxes:\n xmin = int(box.xmin * image_w)\n ymin = int(box.ymin * image_h)\n xmax = int(box.xmax * image_w)\n ymax = int(box.ymax * image_h)\n\n line_width_factor = int(min(image_h, image_w) * 0.005)\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), colors[box.get_label()], line_width_factor * 2)\n cv2.putText(image, \"{} {:.3f}\".format(labels[box.get_label()], box.get_score()),\n (xmin, ymin - line_width_factor * 3), cv2.FONT_HERSHEY_PLAIN, 2e-3 * min(image_h, image_w),\n (0, 255, 0), line_width_factor)\n\n return image\n\n\ndef decode_netout(netout, anchors, nb_class, obj_threshold=0.5, nms_threshold=0.3):\n grid_h, grid_w, nb_box = netout.shape[:3]\n\n boxes = []\n\n # decode the output by the network\n netout[..., 4] = _sigmoid(netout[..., 4])\n netout[..., 5:] = _softmax(netout[..., 5:])\n\n for row in range(grid_h):\n for col in range(grid_w):\n for b in range(nb_box):\n # from 4th element onwards are confidence and class classes\n classes = netout[row, col, b, 5:]\n confidence = netout[row, col, b, 4]\n\n if confidence >= obj_threshold:\n # first 4 elements are x, y, w, and h\n x, y, w, h = netout[row, col, b, :4]\n\n x = (col + _sigmoid(x)) / grid_w # center position, unit: image width\n y = (row + _sigmoid(y)) / grid_h # center position, unit: image height\n w = anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width\n h = anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height\n\n box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, confidence, classes)\n boxes.append(box)\n\n # suppress non-maximal boxes\n for c in range(nb_class):\n sorted_indices = list(reversed(np.argsort([box.classes[c] for box in boxes])))\n\n for i in range(len(sorted_indices)):\n index_i = sorted_indices[i]\n\n if boxes[index_i].classes[c] == 0:\n continue\n else:\n for j in range(i + 1, len(sorted_indices)):\n index_j = sorted_indices[j]\n\n if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:\n boxes[index_i].classes[c] = 0\n\n # remove the boxes which are less likely than a obj_threshold\n boxes = [box for box in boxes if box.get_score() > obj_threshold]\n\n return boxes\n\n\ndef compute_overlap(a, b):\n \"\"\"\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n Parameters\n ----------\n a: (N, 4) ndarray of float\n b: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])\n ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])\n\n iw = np.maximum(iw, 0)\n ih = np.maximum(ih, 0)\n\n ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih\n\n ua = np.maximum(ua, np.finfo(float).eps)\n\n intersection = iw * ih\n\n return intersection / ua\n\n\ndef compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], recall, [1.]))\n mpre = np.concatenate(([0.], precision, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef _interval_overlap(interval_a, interval_b):\n x1, x2 = interval_a\n x3, x4 = interval_b\n\n if x3 < x1:\n if x4 < x1:\n return 0\n else:\n return min(x2, x4) - x1\n else:\n if x2 < x3:\n return 0\n else:\n return min(x2, x4) - x3\n\n\ndef _sigmoid(x):\n return 1. / (1. + np.exp(-x))\n\n\ndef _softmax(x, axis=-1, t=-100.):\n x = x - np.max(x)\n\n if np.min(x) < t:\n x = x / np.min(x) * t\n\n e_x = np.exp(x)\n\n return e_x / e_x.sum(axis, keepdims=True)\n\n\ndef import_dynamically(name):\n components = name.split('.')\n mod = __import__(components[0])\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod\n\n\ndef import_feature_extractor(backend, input_size):\n if backend == 'Inception3':\n feature_extractor = Inception3Feature(input_size)\n elif backend == 'SqueezeNet':\n feature_extractor = SqueezeNetFeature(input_size)\n elif backend == 'MobileNet':\n feature_extractor = MobileNetFeature(input_size)\n elif backend == 'Full Yolo':\n feature_extractor = FullYoloFeature(input_size)\n elif backend == 'Tiny Yolo':\n feature_extractor = TinyYoloFeature(input_size)\n elif backend == 'VGG16':\n feature_extractor = VGG16Feature(input_size)\n elif backend == 'ResNet50':\n feature_extractor = ResNet50Feature(input_size)\n elif os.path.dirname(backend) != \"\":\n base_path = os.path.dirname(backend)\n sys.path.append(base_path)\n custom_backend_name = os.path.basename(backend)\n custom_backend = import_dynamically(custom_backend_name)\n feature_extractor = custom_backend(input_size)\n if not issubclass(custom_backend, BaseFeatureExtractor):\n raise RuntimeError('You are trying to import a custom backend, your backend must be in inherited from '\n ' \"backend.BaseFeatureExtractor\".')\n print('Using a custom backend called {}.'.format(custom_backend_name))\n else:\n raise RuntimeError('Architecture not supported! Only support Full Yolo, Tiny Yolo, MobileNet, SqueezeNet, VGG16'\n ', ResNet50, or Inception3 at the moment!')\n\n return feature_extractor\n\n\n# these funcition are from imutils, you can check this library here: https://github.com/jrosebr1/imutils\n# just added this function to have less dependencies\ndef list_images(base_path, valid_exts=(\".jpg\", \".jpeg\", \".png\", \".bmp\", \".tif\", \".tiff\"), contains=None):\n # return the set of files that are valid\n return list_files(base_path, valid_exts, contains=contains)\n\n\ndef list_files(base_path, valid_exts=\"\", contains=None):\n # loop over the directory structure\n for (rootDir, dirNames, filenames) in os.walk(base_path):\n # loop over the filenames in the current directory\n for filename in filenames:\n # if the contains string is not none and the filename does not contain\n # the supplied string, then ignore the file\n if contains is not None and filename.find(contains) == -1:\n continue\n\n # determine the file extension of the current file\n ext = filename[filename.rfind(\".\"):].lower()\n\n # check to see if the file is an image and should be processed\n if ext.endswith(valid_exts):\n # construct the path to the image and yield it\n image_path = os.path.join(rootDir, filename)\n yield image_path\n\n\ndef enable_memory_growth():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\n\ndef create_backup(config):\n # TODO: the backup creation should be improved, because if the keras_yolo is used as module and the train.py and\n # config.json is in another folder, it wont be copied to the backup\n\n backup_folder = config['backup']['backup_path']\n prefix = config['backup']['backup_prefix']\n backup_id = datetime.now().strftime('%Y%m%d%H%M%S')\n train_folder_name = \"_\".join([prefix, backup_id])\n path = os.path.join(backup_folder, train_folder_name)\n if os.path.isdir(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n shutil.copytree(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"..\"), os.path.join(path, \"Keras-yolo2\"),\n ignore=shutil.ignore_patterns(\".git\"))\n readme_message = \"\"\n while readme_message == \"\":\n readme_message = input(\"Insert a comment about this training: \")\n with open(os.path.join(path, \"readme.txt\"), 'w') as readme_file:\n readme_file.write(readme_message)\n\n if config['backup']['redirect_model']:\n model_name = \".\".join([train_folder_name, \"h5\"])\n model_name = os.path.join(path, model_name)\n log_name = os.path.join(path, \"logs\")\n print('\\n\\nRedirecting {} file name to {}.'.format(config['train']['saved_weights_name'], model_name))\n print('Redirecting {} tensorborad log to {}.'.format(config['train']['tensorboard_log_dir'], log_name))\n config['train']['saved_weights_name'] = model_name\n config['train']['tensorboard_log_dir'] = log_name\n\n return config\n", "sub_path": "keras_yolov2/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 12306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.argmax", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 239, "usage_type": "call"}, {"api_name": "backend.Inception3Feature", "line_number": 254, "usage_type": "call"}, {"api_name": "backend.SqueezeNetFeature", "line_number": 256, "usage_type": "call"}, {"api_name": "backend.MobileNetFeature", "line_number": 258, "usage_type": "call"}, {"api_name": "backend.FullYoloFeature", "line_number": 260, "usage_type": "call"}, {"api_name": "backend.TinyYoloFeature", "line_number": 262, "usage_type": "call"}, {"api_name": "backend.VGG16Feature", "line_number": 264, "usage_type": "call"}, {"api_name": "backend.ResNet50Feature", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path", "line_number": 268, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 269, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}, {"api_name": "backend.BaseFeatureExtractor", "line_number": 273, "usage_type": "argument"}, {"api_name": "os.walk", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 307, "usage_type": "call"}, {"api_name": "os.path", "line_number": 307, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.list_physical_devices", "line_number": 312, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 312, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.set_memory_growth", "line_number": 317, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 317, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.list_logical_devices", "line_number": 318, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 318, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 331, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 331, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path", "line_number": 333, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 334, "usage_type": "call"}, {"api_name": "os.path", "line_number": 334, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 335, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 336, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path", "line_number": 338, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 338, "usage_type": "call"}, {"api_name": "shutil.ignore_patterns", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 343, "usage_type": "call"}, {"api_name": "os.path", "line_number": 343, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path", "line_number": 348, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path", "line_number": 349, "usage_type": "attribute"}]} +{"seq_id": "294296487", "text": "# Extenral\nfrom typing import Optional,Callable,Any,List,Union,Dict\nfrom pprint import pformat\nfrom traceback import format_exc\n\n# Internal\nfrom dbgen.support.func import Func\nfrom dbgen.support.misc import ConnectInfo,ExternalError\nfrom dbgen.core.sql import mkInsCmd,sqlexecute\nfrom dbgen.core.graphs import DiGraph,topsort_with_dict\nfrom dbgen.core.misc import identity,abbreviate\nfrom dbgen.core.lists import merge_dicts\nfrom dbgen.core.misc import hash_\n####################################################################################\n# Type Synonyms\nFuncLikeArg = Optional[Union[Callable,List['PyBlock']]]\n\n\nclass Arg(object):\n \"\"\"\n How a function refers to a namespace\n \"\"\"\n\n def __init__(self,name : str,ind : Optional[int] = None) -> None:\n self.name = name\n self.ind = ind\n\n def hash_(self)->str:\n return hash_((self.name,self.ind))\n\n def src(self)->str:\n ind = '' if self.ind is None else ',%s'%self.ind\n return \"Arg('%s'%s)\"%(self.name,ind)\n\n def arg_get(self,dic:dict)->Any:\n try:\n val = dic[self.name]\n\n if (self.ind is None) or (self.ind == 0 and (not isinstance(val,tuple))):\n return val\n else:\n try:\n return val[self.ind]\n except IndexError:\n dic_ = {k:abbreviate(v) for k,v in dic.items()}\n args = [self.name,self.ind,val,pformat(dic_)]\n msg = 'cannot index into {0} with # {1} in {2} \\ndic = {3}'\n raise IndexError(msg.format(*args))\n except TypeError:\n raise TypeError('cannot index into %s: '%val.__class__,val)\n except KeyError:\n print('could not find %s in '%self.name,list(dic.keys()))\n raise KeyError('could not find %s in '%self.name,list(dic.keys()))\n\n def __str__(self)->str:\n suffix = '' if self.ind is None else '[%d]'%self.ind\n return 'Arg(%s%s)'%(self.name,suffix)\n\n def __repr__(self)->str:\n return str(self)\n\n def __eq__(self,other : object)->bool:\n return self.__dict__==other.__dict__\n\n def add(self,cxn:ConnectInfo,act:int,block:int) -> None:\n q = mkInsCmd('_arg',['action_id','block_id','name','ind'])\n sqlexecute(cxn,q,[act,block,self.name,self.ind])\n\ndef noIndex(xs:List[str])->List[Arg]:\n \"\"\"\n At some point we represented arguments as a dictionary with namespace\n elements as keys and an optional index as values. Frequently we don't want\n to index, so this function simplifies writing the values.\n \"\"\"\n return [Arg(x,None) for x in xs]\n\n\nclass PyBlock(object):\n \"\"\"\n A computational block that executes Python code\n \"\"\"\n def __init__(self\n ,func : Union[Callable,Func]\n ,name : Optional[str] = None\n ,args : Optional[List[Arg]] = None\n ) -> None:\n\n # Validate inputs\n if isinstance(func,Func):\n self.func = func\n else:\n assert callable(func), 'Not a func: '+str(func)\n self.func = Func(func)\n\n # Default options\n args = args or []\n if name is None:\n name = self.func.name\n\n # Store fields\n self.name = name\n self.args = args\n\n def __str__(self)->str:\n fn = '' if self.func.name==self.name else ',%s'%self.func.name\n return 'PyBlock<%s%s,%d args>'%(self.name,fn,len(self.args))\n\n def __repr__(self)->str:\n return str(self)\n\n def hash_(self)->str:\n return hash_((self.name,self.func.src,[a.hash_() for a in self.args]))\n\n def apply(self\n ,curr_dict : Dict[str,Any]\n ,cxn : Optional[ConnectInfo] = None\n ) -> Any:\n \"\"\"\n Take a TempFunc's function and wrap it such that it can accept a namespace\n dictionary. The information about how the function interacts with the\n namespace is contained in the TempFunc's args.\n \"\"\"\n assert cxn is None\n\n try:\n inputvars = [arg.arg_get(curr_dict) for arg in self.args]\n except (KeyError,TypeError,IndexError) as e:\n print(e)\n import pdb;pdb.set_trace()\n raise ValueError()\n\n try:\n return self.func.apply(*inputvars)\n except Exception as e:\n msg = '\\tApplying func %s in tempfunc %s:\\n\\t'%(self.func.name,self.name)\n raise ExternalError(msg + format_exc())\n\n def add(self,cxn:ConnectInfo,a_id:int,b_id:int)->None:\n cols = ['action_id','py_block_id','func_id','name']\n f_id = self.func.add(cxn)\n q = mkInsCmd('_py_block',cols)\n sqlexecute(cxn,q,[a_id,b_id,f_id,self.name])\n\n\n# PyBlock Shortcuts\n#-----------------\n\ndef Rename(a:str,b:str,i:Optional[int] = None)-> PyBlock:\n \"\"\"Rename to (optionally index the i'th element of A)\"\"\"\n return PyBlock(identity,b,[Arg(a,i)])\n\ndef Unpack(a:str,b:List[str])-> List[PyBlock]:\n \"\"\"\n Assign a list of names to indexing of the tuple under the name \n \"\"\"\n return [Rename(a,b[i],i) for i in range(len(b))]\n\ndef SimpleFunc(func : Union[Func,Callable],\n inputs : Optional[List[str]] = None,\n outputs : Optional[List[str]] = None\n ) -> List[PyBlock]:\n \"\"\"\n For an action that has one function in its template\n \"\"\"\n # Defaults\n inputs = inputs or []\n outputs = outputs or []\n\n src = func.src if isinstance(func,Func) else Func(func).src # type: ignore\n name = hash_(src)\n main_func = PyBlock(func,name,args=[Arg(x) for x in inputs])\n\n if main_func.func.nIn > len(inputs):\n main_func.args = noIndex(main_func.func.argnames)\n if len(outputs)==1:\n exported = [PyBlock(identity,outputs[0],[Arg(name)])]\n else:\n exported = [PyBlock(identity,x,[Arg(name,i)])\n for i,x in enumerate(outputs)]\n return [main_func] + exported\n\ndef SimplePipe(funcs : List[Callable],\n inputs : Optional[List[str]] = None,\n outputs : Optional[List[str]] = None\n ) -> List[PyBlock]:\n \"\"\"\n A series of functions where the output of each function is fed to the\n following function in the pipeline. You must specify the names of the\n inputs and outputs to the whole process\n \"\"\"\n inputs = inputs or []\n outputs = outputs or []\n init_func = PyBlock(funcs[0],args=[Arg(x) for x in inputs])\n remaining = [PyBlock(funcs[i],args=[Arg(funcs[i-1].__name__)]) for i in range(1,len(funcs))]\n if len(outputs)==1:\n exported = [PyBlock(identity,outputs[0],[Arg(funcs[-1].__name__)])]\n else:\n exported = [PyBlock(identity,x,[Arg(funcs[-1].__name__,i)])\n for i,x in enumerate(outputs)]\n return [init_func] + remaining + exported\n\n################################################################################\nclass FuncLike(object):\n errmsg = 'Failed with input dict %s'\n def __init__(self,x:FuncLikeArg)->None:\n\n if isinstance(x,type(lambda:None)):\n self.blocks = SimpleFunc(Func(x))\n self.decor = True # we should try to handle namespace automatically\n elif isinstance(x,PyBlock):\n self.blocks = [x]\n self.decor = True\n elif isinstance(x,list):\n if isinstance(x[0],type(lambda:None)):\n self.decor = True\n self.blocks = SimplePipe(x) # type: ignore\n else:\n assert all([isinstance(y,PyBlock) for y in x])\n self.blocks = self.order_blocks(x) # type: ignore\n self.decor = False\n elif x is None:\n self.dtype = 'none'\n self.blocks = []\n self.decor = True\n else:\n raise TypeError('Expected something FuncLike, but got %s'%type(x))\n\n def decorate(self,query_names:List[str],ins_names:Optional[List[str]])->None:\n #print('\\tquery_names %s ins_names %s'%(query_names,ins_names))\n if self.decor:\n if len(self.blocks)==0:\n # Special scneario: no PyBlocks .... we map Query outputs directly\n assert ins_names, 'Query names = %s but no insnames'%(query_names)\n if set(ins_names).issubset(query_names): pass # all good\n elif len(query_names)==len(ins_names):\n renameblocks = [Rename(a,b) for a,b in zip(query_names,ins_names)]\n self.blocks.extend(renameblocks)\n\n else:\n first,last = self.blocks[0],self.blocks[-1]\n if (sorted([a.name for a in first.args]) != sorted(query_names)) \\\n and (len(query_names) >= len(first.args)):\n first.args = noIndex(query_names)\n if ins_names:\n self.blocks.extend(Unpack(last.name,ins_names))\n\n def apply(self,dic:dict,const_dic:dict={})->dict:\n d = merge_dicts([dic,const_dic])\n try:\n for b in self.blocks:\n d[b.name] = b.apply(d) #type: ignore\n return d\n except ExternalError as e:\n err = '%s\\n\\n%s'%(self.errmsg%pformat(d),str(e).replace('\\\\n','\\n'))\n raise ExternalError(err)\n def hash_(self)->str:\n return ','.join([b.hash_() for b in self.blocks])\n\n def namespace(self)->List[str]:\n return [b.name for b in self.blocks]\n\n @staticmethod\n def order_blocks(bs:List[PyBlock])->List[PyBlock]:\n \"\"\"\n Prior to storing the list of blocks as a field of the Plan,\n order them such that any block that calls some other block occurs afterwards\n\n \"\"\"\n G = DiGraph() # each template has a DAG, with tempfuncs as nodes\n G.add_nodes_from([b.name for b in bs])\n\n b_dict = {b.name : b for b in bs}\n\n for b in bs:\n for ba in b.args:\n if ba.name in b_dict.keys():\n G.add_edge(ba.name,b.name)\n return topsort_with_dict(G,b_dict)\n", "sub_path": "dbgen/support/funclike.py", "file_name": "funclike.py", "file_ext": "py", "file_size_in_byte": 10135, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 24, "usage_type": "name"}, {"api_name": "dbgen.core.misc.hash_", "line_number": 29, "usage_type": "call"}, {"api_name": "dbgen.core.misc.abbreviate", "line_number": 45, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 35, "usage_type": "name"}, {"api_name": "dbgen.support.misc.ConnectInfo", "line_number": 65, "usage_type": "name"}, {"api_name": "dbgen.core.sql.mkInsCmd", "line_number": 66, "usage_type": "call"}, {"api_name": "dbgen.core.sql.sqlexecute", "line_number": 67, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 83, "usage_type": "name"}, {"api_name": "dbgen.support.func.Func", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "name"}, {"api_name": "dbgen.support.func.Func", "line_number": 89, "usage_type": "argument"}, {"api_name": "dbgen.support.func.Func", "line_number": 93, "usage_type": "call"}, {"api_name": "dbgen.core.misc.hash_", "line_number": 112, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 115, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 115, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 116, "usage_type": "name"}, {"api_name": "dbgen.support.misc.ConnectInfo", "line_number": 116, "usage_type": "name"}, {"api_name": "pdb.set_trace", "line_number": 129, "usage_type": "call"}, {"api_name": "dbgen.support.misc.ExternalError", "line_number": 136, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 136, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 117, "usage_type": "name"}, {"api_name": "dbgen.support.misc.ConnectInfo", "line_number": 138, "usage_type": "name"}, {"api_name": "dbgen.core.sql.mkInsCmd", "line_number": 141, "usage_type": "call"}, {"api_name": "dbgen.core.sql.sqlexecute", "line_number": 142, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 148, "usage_type": "name"}, {"api_name": "dbgen.core.misc.identity", "line_number": 150, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 158, "usage_type": "name"}, {"api_name": "dbgen.support.func.Func", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 160, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 160, "usage_type": "name"}, {"api_name": "dbgen.support.func.Func", "line_number": 169, "usage_type": "argument"}, {"api_name": "dbgen.core.misc.hash_", "line_number": 170, "usage_type": "call"}, {"api_name": "dbgen.core.misc.identity", "line_number": 176, "usage_type": "argument"}, {"api_name": "dbgen.core.misc.identity", "line_number": 178, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 161, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 183, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 183, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 184, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 184, "usage_type": "name"}, {"api_name": "dbgen.core.misc.identity", "line_number": 196, "usage_type": "argument"}, {"api_name": "dbgen.core.misc.identity", "line_number": 198, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 185, "usage_type": "name"}, {"api_name": "dbgen.support.func.Func", "line_number": 208, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 228, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 228, "usage_type": "name"}, {"api_name": "dbgen.core.lists.merge_dicts", "line_number": 248, "usage_type": "call"}, {"api_name": "dbgen.support.misc.ExternalError", "line_number": 253, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 254, "usage_type": "call"}, {"api_name": "dbgen.support.misc.ExternalError", "line_number": 255, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 259, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 263, "usage_type": "name"}, {"api_name": "dbgen.core.graphs.DiGraph", "line_number": 269, "usage_type": "call"}, {"api_name": "dbgen.core.graphs.topsort_with_dict", "line_number": 278, "usage_type": "call"}]} +{"seq_id": "122122214", "text": "import time\nimport subprocess\nimport os\nimport logging\nimport json\nimport uuid\nimport datetime\nimport psutil\nfrom profiling import profile\nfrom roundwared import settings\nfrom roundwared import db\nfrom roundwared import convertaudio\nfrom roundwared import discover_audiolength\nfrom roundwared import roundexception\nfrom roundwared import icecast2\nfrom roundwared import gpsmixer\nfrom roundwared import rounddbus\nfrom roundware.rw import models\nfrom roundware import settings as rw_settings\n\ndef check_for_single_audiotrack(session_id):\n ret = False\n session = models.Session.objects.select_related('project').get(id=session_id)\n tracks = models.Audiotrack.objects.filter(project=session.project)\n if tracks.count() == 1:\n ret = True\n return ret\n\n#2.1 Protocol additions AS 1.2\n\n\ndef get_current_streaming_asset(request):\n form = request.GET\n if not form.has_key('session_id'):\n raise roundexception.RoundException(\"a session_id is required for this operation\")\n if check_for_single_audiotrack(form.get('session_id')) != True:\n raise roundexception.RoundException(\"this operation is only valid for projects with 1 audiotrack\")\n else:\n l = db.get_current_streaming_asset(form.get('session_id'))\n if l:\n return {\"asset_id\": l.asset.id,\n \"start_time\": l.starttime.isoformat(),\n \"duration_in_stream\": l.duration / 1000000,\n \"current_server_time\": datetime.datetime.now().isoformat()}\n else:\n return {\"user_error_message\": \"no asset found\"}\n\n\ndef get_asset_info(request):\n form = request.GET\n if not form.has_key('asset_id'):\n raise roundexception.RoundException(\"an asset_id is required for this operation\")\n\n if check_for_single_audiotrack(form.get('session_id')) != True:\n raise roundexception.RoundException(\"this operation is only valid for projects with 1 audiotrack\")\n else:\n a = db.get_asset(form.get('asset_id'))\n if a:\n return {\"asset_id\": a.id,\n \"created\": a.created.isoformat(),\n \"duraton_in_ms\": a.audiolength / 1000000}\n else:\n return {\"user_error_message\": \"asset not found\"}\n\ndef play_asset_in_stream(request):\n form = request.GET\n # add skipped asset_id to form in order to track which asset is played\n #assetid = form[asset_id]\n #form[data] = form[\"asset_id\"]\n request = form_to_request(form)\n arg_hack = json.dumps(request)\n db.log_event(\"play_asset_in_stream\", int(form['session_id']), form)\n\n if not form.has_key('session_id'):\n raise roundexception.RoundException(\"a session_id is required for this operation\")\n if check_for_single_audiotrack(form.get('session_id')) != True:\n raise roundexception.RoundException(\"this operation is only valid for projects with 1 audiotrack\")\n if not form.has_key('asset_id'):\n raise roundexception.RoundException(\"an asset_id is required for this operation\")\n rounddbus.emit_stream_signal(int(form['session_id']), \"play_asset\", arg_hack)\n return {\"success\": True}\n\n\ndef skip_ahead(request):\n form = request.GET\n db.log_event(\"skip_ahead\", int(form['session_id']), form)\n\n if not form.has_key('session_id'):\n raise roundexception.RoundException(\"a session_id is required for this operation\")\n if check_for_single_audiotrack(form.get('session_id')) != True:\n raise roundexception.RoundException(\"this operation is only valid for projects with 1 audiotrack\")\n rounddbus.emit_stream_signal(int(form['session_id']), \"skip_ahead\", \"\")\n return {\"success\": True}\n\ndef vote_asset(request):\n form = request.GET\n db.log_event(\"vote_asset\", int(form['session_id']), form)\n\n if not form.has_key('session_id'):\n raise roundexception.RoundException(\"a session_id is required for this operation\")\n if not form.has_key('asset_id'):\n raise roundexception.RoundException(\"an asset_id is required for this operation\")\n if not form.has_key('vote_type'):\n raise roundexception.RoundException(\"a vote_type is required for this operation\")\n if check_for_single_audiotrack(form.get('session_id')) != True:\n raise roundexception.RoundException(\"this operation is only valid for projects with 1 audiotrack\")\n\n try:\n session = models.Session.objects.get(id=int(form.get('session_id')))\n except:\n raise roundexception.RoundException(\"Session not found.\")\n try:\n asset = models.Asset.objects.get(id=int(form.get('asset_id')))\n except:\n raise roundexception.RoundException(\"asset not found.\")\n if not form.has_key('value'):\n v = models.Vote(asset=asset, session=session, type=form.get('vote_type'))\n else:\n v = models.Vote(asset=asset, session=session, value=int(form.get('value')), type=form.get('vote_type'))\n v.save()\n return {\"success\": True}\n\n#2.0 Protocol\n\n\n# @profile(stats=True)\ndef get_config(request):\n form = request.GET\n try:\n hostname_without_port = str(settings.config[\"external_host_name_without_port\"])\n except KeyError:\n raise roundexception.RoundException(\"Roundware configuration file is missing 'external_host_name_without_port' key. \")\n #check params\n if not form.has_key('project_id'):\n raise roundexception.RoundException(\"a project_id is required for this operation\")\n project = models.Project.objects.get(id=form.get('project_id'))\n speakers = models.Speaker.objects.filter(project=form.get('project_id')).values()\n audiotracks = models.Audiotrack.objects.filter(project=form.get('project_id')).values()\n\n if not form.has_key('device_id') or (form.has_key('device_id') and form['device_id'] == \"\"):\n device_id = str(uuid.uuid4())\n else:\n device_id = form.get('device_id')\n\n l = models.Language.objects.filter(language_code='en')[0]\n if form.has_key('language') or (form.has_key('language') and form['language'] == \"\"):\n try:\n l = models.Language.objects.filter(language_code=form.get('language'))[0]\n except:\n pass\n\n s = models.Session(device_id=device_id, starttime=datetime.datetime.now(), project=project, language=l)\n if form.has_key('client_type'):\n s.client_type = form.get('client_type')\n if form.has_key('client_system'):\n s.client_system = form.get('client_system')\n\n\n sharing_url = str.format(\"http://{0}/roundware/?operation=view_envelope&envelopeid=[id]\", hostname_without_port)\n sharing_message = \"none set\"\n out_of_range_message = \"none set\"\n legal_agreement = \"none set\"\n demo_stream_message = \"none set\"\n try:\n sharing_message = project.sharing_message_loc.filter(language=l)[0].localized_string\n except:\n pass\n try:\n out_of_range_message = project.out_of_range_message_loc.filter(language=l)[0].localized_string\n except:\n pass\n try:\n legal_agreement = project.legal_agreement_loc.filter(language=l)[0].localized_string\n except:\n pass\n try:\n demo_stream_message = project.demo_stream_message_loc.filter(language=l)[0].localized_string\n except:\n pass\n\n cpu_idle = psutil.cpu_times_percent().idle\n s.demo_stream_enabled = project.demo_stream_enabled or cpu_idle < float(settings.config[\"demo_stream_cpu_limit\"])\n\n s.save()\n session_id = s.id\n\n response = [\n {\"device\":{\"device_id\": device_id}},\n {\"session\":{\"session_id\": session_id}},\n {\"project\":{\n \"project_id\":project.id,\n \"project_name\":project.name,\n \"audio_format\":project.audio_format,\n \"max_recording_length\":project.max_recording_length,\n \"recording_radius\":project.recording_radius,\n \"sharing_message\":sharing_message,\n \"out_of_range_message\":out_of_range_message,\n \"sharing_url\":project.sharing_url,\n \"listen_questions_dynamic\":project.listen_questions_dynamic,\n \"speak_questions_dynamic\":project.speak_questions_dynamic,\n \"listen_enabled\":project.listen_enabled,\n \"geo_listen_enabled\":project.geo_listen_enabled,\n \"speak_enabled\":project.speak_enabled,\n \"geo_speak_enabled\":project.geo_speak_enabled,\n \"reset_tag_defaults_on_startup\":project.reset_tag_defaults_on_startup,\n \"legal_agreement\":legal_agreement,\n \"files_url\":project.files_url,\n \"files_version\":project.files_version,\n \"audio_stream_bitrate\":project.audio_stream_bitrate,\n # TODO: following attribute 'demo_stream_enabled' has be moved to the 'session' object\n \"demo_stream_enabled\":s.demo_stream_enabled,\n \"demo_stream_url\":project.demo_stream_url,\n \"demo_stream_message\":demo_stream_message,\n }},\n\n {\"server\":{\n \"version\": \"2.0\"}},\n {\"speakers\":[dict(d) for d in speakers]},\n {\"audiotracks\":[dict(d) for d in audiotracks]}\n ]\n db.log_event('start_session', session_id, None)\n return response\n\n\n# @profile(stats=True)\ndef get_tags_for_project(request):\n form = request.GET\n p = None\n s = None\n if not form.has_key('project_id') and not form.has_key('session_id'):\n raise roundexception.RoundException(\"either a project_id or session_id is required for this operation\")\n\n if form.has_key('project_id'):\n p = models.Project.objects.get(id=form.get('project_id'))\n if form.has_key('session_id'):\n s = models.Session.objects.get(id=form.get('session_id'))\n return db.get_config_tag_json(p, s)\n\n\n#get_available_assets\n#args (project_id, [latitude], [longitude], [radius], [tagids,], [tagbool], [language], [asset_id,...], [envelope_id,...], [...])\n#can pass additional parameters matching name of fields on Asset\n#example: http://localhost/roundware/?operation=get_available_assets\n#returns Dictionary\n# @profile(stats=True)\ndef get_available_assets(request):\n \"\"\"Return JSON serializable dictionary with the number of matching assets\n by media type and a list of available assets based on filter criteria passed in\n request. If asset_id is passed, ignore other filters and return single\n asset. If multiple, comma-separated values for asset_id are passed, ignore\n other filters and return all those assets. If envelope_id is passed, ignore\n other filters and return all assets in that envelope. If multiple, \n comma-separated values for envelope_id are passed, ignore\n other filters and return all those assets. Returns localized\n value for tag strings on asset by asset basis unless a specific language\n code is passed. Fall back to English if necessary.\"\"\"\n \n\n def _get_best_localized_string(asset, tag, best_lang_id):\n \"\"\" Return localized string with specified language code.\n If that's not available, look for a language field on the model and\n use that. If that's not available, fall back to English. \n \"\"\"\n try:\n localization = tag.loc_msg.get(language=best_lang_id)\n except models.LocalizedString.DoesNotExist:\n # try object's specified language\n asset_lang = asset.language\n if asset_lang and retlng != asset_lang:\n localization = tag.loc_msg.get(language=asset_lang)\n else:\n # fall back to English\n eng_id = models.Language.objects.get(language_code='en')\n localization = tag.loc_msg.get(language=eng_id)\n return localization.localized_string\n\n form = request.GET\n kw = {}\n try:\n hostname_without_port = str(settings.config[\"external_host_name_without_port\"])\n except KeyError:\n raise roundexception.RoundException(\"Roundware configuration file is missing 'external_host_name_without_port' key. \")\n\n known_params = ['project_id', 'latitude', 'longitude',\n 'tag_ids', 'tagbool', 'radius', 'language', 'asset_id',\n 'envelope_id' ]\n project_id = get_parameter_from_request(request, 'project_id', None)\n asset_id = get_parameter_from_request(request, 'asset_id', None)\n envelope_id = get_parameter_from_request(request, 'envelope_id', None)\n latitude = get_parameter_from_request(request, 'latitude', None)\n longitude = get_parameter_from_request(request, 'longitude', None)\n radius = get_parameter_from_request(request, 'radius', None)\n tag_ids = get_parameter_from_request(request, 'tagids', None)\n tagbool = get_parameter_from_request(request, 'tagbool', None)\n language = get_parameter_from_request(request, 'language', None)\n if (latitude and not longitude) or (longitude and not latitude):\n raise roundexception.RoundException(\n \"This operation requires that you pass both latitude and \"\n \"longitude, if you pass either one.\")\n\n # accept other keyword parameters as long as the keys are fields on\n # Asset model\n asset_fields = models.get_field_names_from_model(models.Asset)\n asset_media_types = [tup[0] for tup in models.Asset.ASSET_MEDIA_TYPES]\n extraparams = [(param[0], param[1]) for param in form.items()\n if param[0] not in known_params and\n param[0] in asset_fields]\n extras = {}\n for k, v in extraparams:\n extras[str(k)] = str(v)\n\n # if a language (code) is specified, use that\n # otherwise, return localized value specific to Asset \n qry_retlng = None\n if language:\n try:\n qry_retlng = models.Language.objects.get(language_code=language)\n except models.Language.DoesNotExist:\n raise roundexception.RoundException(\n \"Specified language code does not exist.\"\n )\n\n if project_id or asset_id or envelope_id:\n \n # by asset\n if asset_id:\n # ignore all other filter criteria\n assets = models.Asset.objects.filter(id__in=asset_id.split(','))\n \n # by envelope \n elif envelope_id:\n assets = []\n envelopes = models.Envelope.objects.filter(id__in=envelope_id.split(','))\n for e in envelopes:\n e_assets = e.assets.all()\n for a in e_assets:\n if a not in assets:\n assets.append(a)\n \n # by project\n elif project_id:\n project = models.Project.objects.get(id=project_id)\n kw['project__exact'] = project\n \n assets = models.Asset.objects.filter(**kw)\n if tag_ids:\n if tagbool and str(tagbool).lower() == 'or':\n assets = assets.filter(tags__in=tag_ids.split(',')).distinct()\n else:\n # 'and'. Asset must have all tags\n for tag_id in tag_ids.split(','):\n assets = assets.filter(tags__id=tag_id)\n\n # filter by extra params. These are chained with an AND\n assets = assets.filter(**extras)\n\n if latitude and longitude: # need both\n # return only assets within specified or default radius\n # by project\n latitude = float(latitude)\n longitude = float(longitude)\n if not radius:\n radius = project.recording_radius\n if not radius:\n raise roundexception.RoundException(\"Project does not \"\n \"specify a radius and no radius parameter passed to \"\n \"operation.\")\n radius = float(radius)\n for asset in assets:\n distance = gpsmixer.distance_in_meters(\n latitude, longitude,\n asset.latitude, asset.longitude)\n if distance > radius:\n assets = assets.exclude(id=asset.id)\n\n assets_info = {}\n assets_info['number_of_assets'] = {}\n for mtype in asset_media_types:\n assets_info['number_of_assets'][mtype]= 0\n assets_list = []\n\n for asset in assets:\n if asset.mediatype in asset_media_types:\n assets_info['number_of_assets'][asset.mediatype] +=1\n if not qry_retlng:\n retlng = asset.language # can be None\n else:\n retlng = qry_retlng\n assets_list.append(\n dict(asset_id=asset.id,\n asset_url='%s%s' % (\n rw_settings.AUDIO_FILE_URI, asset.filename),\n latitude=asset.latitude,\n longitude=asset.longitude,\n audio_length=asset.audiolength,\n submitted=asset.submitted,\n project=asset.project.name,\n language=asset.language.language_code,\n tags=[dict(\n tag_category_name=tag.tag_category.name,\n tag_id=tag.id,\n localized_value=_get_best_localized_string(\n asset, tag, retlng)\n ) for tag in asset.tags.all()]),\n )\n assets_info['assets'] = assets_list\n return assets_info\n\n else:\n raise roundexception.RoundException(\"This operation requires that you \"\n \"pass a project_id, asset_id, or envelope_id\") \n\ndef log_event(request):\n\n form = request.GET\n if not form.has_key('session_id'):\n raise roundexception.RoundException(\"a session_id is required for this operation\")\n if not form.has_key('event_type'):\n raise roundexception.RoundException(\"an event_type is required for this operation\")\n db.log_event(form.get('event_type'), form.get('session_id'), form)\n\n return {\"success\": True}\n\n#create_envelope\n#args: (operation, session_id, [tags])\n#example: http://localhost/roundware/?operation=create_envelope&sessionid=1&tags=1,2\n#returns envelope_id, sharing_messsage\n#example:\n#{\"envelope_id\": 2}\n\n# @profile(stats=True)\ndef create_envelope(request):\n form = request.GET\n if not form.has_key('session_id'):\n raise roundexception.RoundException(\"a session_id is required for this operation\")\n s = models.Session.objects.get(id=form.get('session_id'))\n\n #todo - tags\n\n env = models.Envelope(session=s)\n env.save()\n\n return {\"envelope_id\": env.id}\n\n#add_asset_to_envelope (POST method)\n#args (operation, envelope_id, file, latitude, longitude, [tagids])\n#example: http://localhost/roundware/?operation=add_asset_to_envelope\n# OR\n#add_asset_to_envelope (GET method)\n#args (operation, envelope_id, asset_id) #asset_id must point to an Asset that exists in the database\n#returns success bool\n#{\"success\": true}\n# @profile(stats=True)\ndef add_asset_to_envelope(request):\n\n #get asset_id from the GET request\n asset_id = get_parameter_from_request(request, 'asset_id', False)\n asset = None\n #grab the Asset from the database, if an asset_id has been passed in\n if asset_id:\n try:\n asset = models.Asset.objects.get(pk=asset_id)\n except models.Asset.DoesNotExist:\n raise roundexception.RoundException(\"Invalid Asset ID Provided. No Asset exists with ID %s\" % asset_id)\n envelope_id = get_parameter_from_request(request, 'envelope_id', True)\n\n envelope = models.Envelope.objects.select_related('session').get(id=envelope_id)\n session = envelope.session\n\n db.log_event(\"start_upload\", session.id, request.GET)\n\n fileitem = request.FILES.get('file') if not asset else asset.file\n #get mediatype from the GET request\n mediatype = get_parameter_from_request(request, 'mediatype', False) if not asset else asset.mediatype\n #if mediatype parameter not passed, set to 'audio'\n #this ensures backwards compatibility\n if mediatype is None:\n mediatype = \"audio\"\n\n if fileitem.name:\n #copy the file to a unique name (current time and date)\n logging.debug(\"Processing \" + fileitem.name)\n (filename_prefix, filename_extension) = \\\n os.path.splitext(fileitem.name)\n fn = time.strftime(\"%Y%m%d-%H%M%S\") + filename_extension\n fileout = open(os.path.join(settings.config[\"upload_dir\"], fn), 'wb')\n fileout.write(fileitem.file.read())\n fileout.close()\n #delete the uploaded original after the copy has been made\n if asset:\n asset.file.delete()\n # re-assign file to asset\n asset.file.name = fn\n asset.filename = fn\n asset.save()\n #make sure everything is in wav form only if mediatype is audio\n if mediatype == \"audio\":\n newfilename = convertaudio.convert_uploaded_file(fn)\n else:\n newfilename = fn\n if newfilename:\n #create the new asset if request comes in from a source other\n #than the django admin interface\n if not asset:\n #get location data from request\n latitude = get_parameter_from_request(request, 'latitude', False)\n longitude = get_parameter_from_request(request, 'longitude', False)\n #if no location data in request, default to project latitude and longitude\n if not latitude:\n latitude = session.project.latitude\n if not longitude:\n longitude = session.project.longitude\n tagset = []\n tags = get_parameter_from_request(request, 'tags', False)\n if tags is not None:\n ids = tags.split(',')\n tagset = models.Tag.objects.filter(id__in=ids)\n\n # get optional submitted parameter from request (Y, N or blank string are only acceptable values)\n submitted = get_parameter_from_request(request, 'submitted', False)\n # set submitted variable to proper boolean value if it is passed as parameter\n if submitted == \"N\":\n submitted = False\n elif submitted == \"Y\":\n submitted = True\n # if blank string or not included as parameter, check if in range of project and if so\n # set asset.submitted based on project.auto_submit boolean value\n elif submitted is None or len(submitted) == 0:\n submitted = False\n if is_listener_in_range_of_stream(request.GET, session.project):\n submitted = session.project.auto_submit\n\n asset = models.Asset(latitude=latitude,\n longitude=longitude,\n filename=newfilename,\n session=session,\n submitted=submitted,\n mediatype=mediatype,\n volume=1.0,\n language=session.language,\n project = session.project)\n asset.file.name = fn\n asset.save()\n for t in tagset:\n asset.tags.add(t)\n #if the request comes from the django admin interface\n #update the Asset with the right information\n else:\n #update asset with session\n asset.session = session\n asset.filename = newfilename\n\n #get the audiolength of the file only if mediatype is audio and update the Asset\n if mediatype == \"audio\":\n discover_audiolength.discover_and_set_audiolength(asset, newfilename)\n asset.save()\n envelope.assets.add(asset)\n envelope.save()\n else:\n raise roundexception.RoundException(\"File not converted successfully: \" + newfilename)\n else:\n raise roundexception.RoundException(\"No file in request\")\n rounddbus.emit_stream_signal(0, \"refresh_recordings\", \"\")\n return {\"success\": True,\n \"asset_id\": asset.id}\n\n\ndef get_parameter_from_request(request, name, required):\n ret = None\n if request.POST.has_key(name):\n ret = request.POST.get(name)\n elif request.GET.has_key(name):\n ret = request.GET.get(name)\n else:\n if required:\n raise roundexception.RoundException(name + \" is required for this operation\")\n return ret\n\n\n# @profile(stats=True)\ndef request_stream(request):\n request_form = request.GET\n try:\n hostname_without_port = str(settings.config[\"external_host_name_without_port\"])\n except KeyError:\n raise roundexception.RoundException(\"Roundware configuration file is missing 'external_host_name_without_port' key. \")\n db.log_event(\"request_stream\", int(request_form['session_id']), request_form)\n\n if not request_form.get('session_id'):\n raise roundexception.RoundException(\"Must supply session_id.\")\n session = models.Session.objects.select_related('project').get(id=request_form.get('session_id'))\n project = session.project\n\n if session.demo_stream_enabled:\n msg = \"demo_stream_message\"\n try:\n msg = project.demo_stream_message_loc.filter(language=session.language)[0].localized_string\n except:\n pass\n\n if project.demo_stream_url:\n url = project.demo_stream_url\n else:\n url = \"http://\" + hostname_without_port + \":\" + \\\n str(settings.config[\"icecast_port\"]) + \\\n \"/demo_stream.mp3\"\n\n return {\n 'stream_url': url,\n 'demo_stream_message': msg\n }\n\n elif is_listener_in_range_of_stream(request_form, project):\n command = ['/usr/local/bin/streamscript', '--session_id', str(session.id), '--project_id', str(project.id)]\n for p in ['latitude', 'longitude', 'audio_format']:\n if request_form.has_key(p) and request_form[p]:\n command.extend(['--' + p, request_form[p].replace(\"\\t\", \",\")])\n if request_form.has_key('config'):\n command.extend(['--configfile', os.path.join(settings.configdir, request_form['config'])])\n else:\n command.extend(['--configfile', os.path.join(settings.configdir, 'rw')])\n if request_form.has_key('audio_stream_bitrate'):\n command.extend(['--audio_stream_bitrate', str(request_form['audio_stream_bitrate'])])\n\n audio_format = project.audio_format.upper()\n\n apache_safe_daemon_subprocess(command)\n wait_for_stream(session.id, audio_format)\n\n return {\n \"stream_url\": \"http://\" + hostname_without_port + \":\" + \\\n str(settings.config[\"icecast_port\"]) + \\\n icecast_mount_point(session.id, audio_format),\n }\n else:\n msg = \"This application is designed to be used in specific geographic locations. Apparently your phone thinks you are not at one of those locations, so you will hear a sample audio stream instead of the real deal. If you think your phone is incorrect, please restart Scapes and it will probably work. Thanks for checking it out!\"\n try:\n msg = project.out_of_range_message_loc.filter(language=session.language)[0].localized_string\n except:\n pass\n\n if project.out_of_range_url:\n url = project.out_of_range_url\n else:\n url = \"http://\" + hostname_without_port + \":\" + \\\n str(settings.config[\"icecast_port\"]) + \\\n \"/outofrange.mp3\"\n\n return {\n 'stream_url': url,\n 'user_message': msg\n }\n\n# @profile(stats=True)\ndef modify_stream(request):\n success = False\n msg = \"\"\n form = request.GET\n request = form_to_request(form)\n arg_hack = json.dumps(request)\n db.log_event(\"modify_stream\", int(form['session_id']), form)\n\n if form.has_key('session_id'):\n session = models.Session.objects.select_related('project').get(id=form['session_id'])\n project = session.project\n if form.has_key('language'):\n try:\n logging.debug(\"modify_stream: language: \" + form['language'])\n l = models.Language.objects.filter(language_code=form['language'])[0]\n session.language = l\n session.save()\n except:\n raise roundexception.RoundException(\"language not supported\")\n\n audio_format = project.audio_format.upper()\n if stream_exists(int(form['session_id']), audio_format):\n rounddbus.emit_stream_signal(int(form['session_id']), \"modify_stream\", arg_hack)\n success = True\n else:\n msg = \"no stream available for session: \" + form['session_id']\n else:\n msg = \"a session_id is required for this operation\"\n\n if success:\n return {\"success\": success}\n else:\n return {\"success\": success, }\n\n\ndef move_listener(request):\n form = request.GET\n request = form_to_request(form)\n arg_hack = json.dumps(request)\n rounddbus.emit_stream_signal(int(form['session_id']), \"move_listener\", arg_hack)\n return {\"success\": True}\n\n\ndef heartbeat(request):\n form = request.GET\n rounddbus.emit_stream_signal(int(form['session_id']), \"heartbeat\", \"\")\n db.log_event(\"heartbeat\", int(form['session_id']), form)\n return {\"success\": True}\n\n\ndef current_version(request):\n return {\"version\": \"2.0\"}\n\n#END 2.0 Protocol\n\n#2.0 Helper methods\n\n\ndef apache_safe_daemon_subprocess(command):\n logging.debug(str(command))\n DEVNULL_OUT = open(os.devnull, 'w')\n DEVNULL_IN = open(os.devnull, 'r')\n proc = subprocess.Popen(\n command,\n close_fds=True,\n stdin=DEVNULL_IN,\n stdout=DEVNULL_OUT,\n stderr=DEVNULL_OUT,\n )\n proc.wait()\n\n\n# Loops until the give stream is present and ready to be listened to.\ndef wait_for_stream(sessionid, audio_format):\n logging.debug(\"waiting \" + str(sessionid) + audio_format)\n admin = icecast2.Admin(settings.config[\"icecast_host\"] + \":\" + str(settings.config[\"icecast_port\"]),\n settings.config[\"icecast_username\"],\n settings.config[\"icecast_password\"])\n retries_left = 1000\n while not admin.stream_exists(icecast_mount_point(sessionid, audio_format)):\n if retries_left > 0:\n retries_left -= 1\n else:\n raise roundexception.RoundException(\"Stream timedout on creation\")\n time.sleep(0.1)\n\n\ndef stream_exists(sessionid, audio_format):\n logging.debug(\"checking for existence of \" + str(sessionid) + audio_format)\n admin = icecast2.Admin(settings.config[\"icecast_host\"] + \":\" + str(settings.config[\"icecast_port\"]),\n settings.config[\"icecast_username\"],\n settings.config[\"icecast_password\"])\n return admin.stream_exists(icecast_mount_point(sessionid, audio_format))\n\n\ndef is_listener_in_range_of_stream(form, proj):\n if not ('latitude' in form and 'longitude' in form) or not (form['latitude'] and form['longitude']):\n return True\n speakers = models.Speaker.objects.filter(project=proj, activeyn=True)\n\n for speaker in speakers:\n #only do this if latitude and longitude are included, return False otherwise\n distance = gpsmixer.distance_in_meters(\n float(form['latitude']),\n float(form['longitude']),\n speaker.latitude,\n speaker.longitude)\n if distance < 3 * speaker.maxdistance:\n return True\n return False\n\n#END 2.0 Helper methods\n\n\ndef form_to_request(form):\n request = {}\n for p in ['project_id', 'session_id', 'asset_id']:\n if form.has_key(p) and form[p]:\n request[p] = map(int, form[p].split(\"\\t\"))\n else:\n request[p] = []\n for p in ['tags']:\n if form.has_key(p) and form[p]:\n # make sure we don't have blank values from trailing commas\n p_list = [v for v in form[p].split(\",\") if v != \"\"]\n request[p] = map(int, p_list)\n else:\n request[p] = []\n\n for p in ['latitude', 'longitude']:\n if form.has_key(p) and form[p]:\n request[p] = float(form[p])\n else:\n request[p] = False\n return request\n\n\ndef icecast_mount_point(sessionid, audio_format):\n return '/stream' + str(sessionid) + \".\" + audio_format.lower()\n", "sub_path": "roundwared/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 32799, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "roundware.rw.models.Session.objects.select_related", "line_number": 23, "usage_type": "call"}, {"api_name": "roundware.rw.models.Session", "line_number": 23, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 23, "usage_type": "name"}, {"api_name": "roundware.rw.models.Audiotrack.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "roundware.rw.models.Audiotrack", "line_number": 24, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 24, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 35, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 35, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 37, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 37, "usage_type": "name"}, {"api_name": "roundwared.db.get_current_streaming_asset", "line_number": 39, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 52, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 52, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 55, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 55, "usage_type": "name"}, {"api_name": "roundwared.db.get_asset", "line_number": 57, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 57, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "roundwared.db.log_event", "line_number": 72, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 72, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 75, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 75, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 77, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 77, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 79, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 79, "usage_type": "name"}, {"api_name": "roundwared.rounddbus.emit_stream_signal", "line_number": 80, "usage_type": "call"}, {"api_name": "roundwared.rounddbus", "line_number": 80, "usage_type": "name"}, {"api_name": "roundwared.db.log_event", "line_number": 86, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 86, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 89, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 89, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 91, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 91, "usage_type": "name"}, {"api_name": "roundwared.rounddbus.emit_stream_signal", "line_number": 92, "usage_type": "call"}, {"api_name": "roundwared.rounddbus", "line_number": 92, "usage_type": "name"}, {"api_name": "roundwared.db.log_event", "line_number": 97, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 97, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 100, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 100, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 102, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 102, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 104, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 104, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 106, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 106, "usage_type": "name"}, {"api_name": "roundware.rw.models.Session.objects.get", "line_number": 109, "usage_type": "call"}, {"api_name": "roundware.rw.models.Session", "line_number": 109, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 109, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 111, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 111, "usage_type": "name"}, {"api_name": "roundware.rw.models.Asset.objects.get", "line_number": 113, "usage_type": "call"}, {"api_name": "roundware.rw.models.Asset", "line_number": 113, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 113, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 115, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 115, "usage_type": "name"}, {"api_name": "roundware.rw.models.Vote", "line_number": 117, "usage_type": "call"}, {"api_name": "roundware.rw.models", "line_number": 117, "usage_type": "name"}, {"api_name": "roundware.rw.models.Vote", "line_number": 119, "usage_type": "call"}, {"api_name": "roundware.rw.models", "line_number": 119, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 130, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 130, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 132, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 132, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 135, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 135, "usage_type": "name"}, {"api_name": "roundware.rw.models.Project.objects.get", "line_number": 136, "usage_type": "call"}, {"api_name": "roundware.rw.models.Project", "line_number": 136, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 136, "usage_type": "name"}, {"api_name": "roundware.rw.models.Speaker.objects.filter", "line_number": 137, "usage_type": "call"}, {"api_name": "roundware.rw.models.Speaker", "line_number": 137, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 137, "usage_type": "name"}, {"api_name": "roundware.rw.models.Audiotrack.objects.filter", "line_number": 138, "usage_type": "call"}, {"api_name": "roundware.rw.models.Audiotrack", "line_number": 138, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 138, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 141, "usage_type": "call"}, {"api_name": "roundware.rw.models.Language.objects.filter", "line_number": 145, "usage_type": "call"}, {"api_name": "roundware.rw.models.Language", "line_number": 145, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 145, "usage_type": "name"}, {"api_name": "roundware.rw.models.Language.objects.filter", "line_number": 148, "usage_type": "call"}, {"api_name": "roundware.rw.models.Language", "line_number": 148, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 148, "usage_type": "name"}, {"api_name": "roundware.rw.models.Session", "line_number": 152, "usage_type": "call"}, {"api_name": "roundware.rw.models", "line_number": 152, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 152, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 152, "usage_type": "attribute"}, {"api_name": "psutil.cpu_times_percent", "line_number": 181, "usage_type": "call"}, {"api_name": "roundwared.settings.config", "line_number": 182, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 182, "usage_type": "name"}, {"api_name": "roundwared.db.log_event", "line_number": 221, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 221, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 231, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 231, "usage_type": "name"}, {"api_name": "roundware.rw.models.Project.objects.get", "line_number": 234, "usage_type": "call"}, {"api_name": "roundware.rw.models.Project", "line_number": 234, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 234, "usage_type": "name"}, {"api_name": "roundware.rw.models.Session.objects.get", "line_number": 236, "usage_type": "call"}, {"api_name": "roundware.rw.models.Session", "line_number": 236, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 236, "usage_type": "name"}, {"api_name": "roundwared.db.get_config_tag_json", "line_number": 237, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 237, "usage_type": "name"}, {"api_name": "roundware.rw.models.LocalizedString", "line_number": 266, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 266, "usage_type": "name"}, {"api_name": "roundware.rw.models.Language.objects.get", "line_number": 273, "usage_type": "call"}, {"api_name": "roundware.rw.models.Language", "line_number": 273, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 273, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 280, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 280, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 282, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 282, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 297, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 297, "usage_type": "name"}, {"api_name": "roundware.rw.models.get_field_names_from_model", "line_number": 303, "usage_type": "call"}, {"api_name": "roundware.rw.models", "line_number": 303, "usage_type": "name"}, {"api_name": "roundware.rw.models.Asset", "line_number": 303, "usage_type": "attribute"}, {"api_name": "roundware.rw.models.Asset", "line_number": 304, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 304, "usage_type": "name"}, {"api_name": "roundware.rw.models.Language.objects.get", "line_number": 317, "usage_type": "call"}, {"api_name": "roundware.rw.models.Language", "line_number": 317, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 317, "usage_type": "name"}, {"api_name": "roundware.rw.models.Language", "line_number": 318, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 318, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 319, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 319, "usage_type": "name"}, {"api_name": "roundware.rw.models.Asset.objects.filter", "line_number": 328, "usage_type": "call"}, {"api_name": "roundware.rw.models.Asset", "line_number": 328, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 328, "usage_type": "name"}, {"api_name": "roundware.rw.models.Envelope.objects.filter", "line_number": 333, "usage_type": "call"}, {"api_name": "roundware.rw.models.Envelope", "line_number": 333, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 333, "usage_type": "name"}, {"api_name": "roundware.rw.models.Project.objects.get", "line_number": 342, "usage_type": "call"}, {"api_name": "roundware.rw.models.Project", "line_number": 342, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 342, "usage_type": "name"}, {"api_name": "roundware.rw.models.Asset.objects.filter", "line_number": 345, "usage_type": "call"}, {"api_name": "roundware.rw.models.Asset", "line_number": 345, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 345, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 365, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 365, "usage_type": "name"}, {"api_name": "roundwared.gpsmixer.distance_in_meters", "line_number": 370, "usage_type": "call"}, {"api_name": "roundwared.gpsmixer", "line_number": 370, "usage_type": "name"}, {"api_name": "roundware.settings.AUDIO_FILE_URI", "line_number": 392, "usage_type": "attribute"}, {"api_name": "roundware.settings", "line_number": 392, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 410, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 410, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 417, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 417, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 419, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 419, "usage_type": "name"}, {"api_name": "roundwared.db.log_event", "line_number": 420, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 420, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 435, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 435, "usage_type": "name"}, {"api_name": "roundware.rw.models.Session.objects.get", "line_number": 436, "usage_type": "call"}, {"api_name": "roundware.rw.models.Session", "line_number": 436, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 436, "usage_type": "name"}, {"api_name": "roundware.rw.models.Envelope", "line_number": 440, "usage_type": "call"}, {"api_name": "roundware.rw.models", "line_number": 440, "usage_type": "name"}, {"api_name": "roundware.rw.models.Asset.objects.get", "line_number": 462, "usage_type": "call"}, {"api_name": "roundware.rw.models.Asset", "line_number": 462, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 462, "usage_type": "name"}, {"api_name": "roundware.rw.models.Asset", "line_number": 463, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 463, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 464, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 464, "usage_type": "name"}, {"api_name": "roundware.rw.models.Envelope.objects.select_related", "line_number": 467, "usage_type": "call"}, {"api_name": "roundware.rw.models.Envelope", "line_number": 467, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 467, "usage_type": "name"}, {"api_name": "roundwared.db.log_event", "line_number": 470, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 470, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 482, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 484, "usage_type": "call"}, {"api_name": "os.path", "line_number": 484, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 485, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 486, "usage_type": "call"}, {"api_name": "os.path", "line_number": 486, "usage_type": "attribute"}, {"api_name": "roundwared.settings.config", "line_number": 486, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 486, "usage_type": "name"}, {"api_name": "roundwared.convertaudio.convert_uploaded_file", "line_number": 498, "usage_type": "call"}, {"api_name": "roundwared.convertaudio", "line_number": 498, "usage_type": "name"}, {"api_name": "roundware.rw.models.Tag.objects.filter", "line_number": 517, "usage_type": "call"}, {"api_name": "roundware.rw.models.Tag", "line_number": 517, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 517, "usage_type": "name"}, {"api_name": "roundware.rw.models.Asset", "line_number": 533, "usage_type": "call"}, {"api_name": "roundware.rw.models", "line_number": 533, "usage_type": "name"}, {"api_name": "roundwared.discover_audiolength.discover_and_set_audiolength", "line_number": 555, "usage_type": "call"}, {"api_name": "roundwared.discover_audiolength", "line_number": 555, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 560, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 560, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 562, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 562, "usage_type": "name"}, {"api_name": "roundwared.rounddbus.emit_stream_signal", "line_number": 563, "usage_type": "call"}, {"api_name": "roundwared.rounddbus", "line_number": 563, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 576, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 576, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 584, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 584, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 586, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 586, "usage_type": "name"}, {"api_name": "roundwared.db.log_event", "line_number": 587, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 587, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 590, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 590, "usage_type": "name"}, {"api_name": "roundware.rw.models.Session.objects.select_related", "line_number": 591, "usage_type": "call"}, {"api_name": "roundware.rw.models.Session", "line_number": 591, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 591, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 605, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 605, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 619, "usage_type": "call"}, {"api_name": "os.path", "line_number": 619, "usage_type": "attribute"}, {"api_name": "roundwared.settings.configdir", "line_number": 619, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 619, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 621, "usage_type": "call"}, {"api_name": "os.path", "line_number": 621, "usage_type": "attribute"}, {"api_name": "roundwared.settings.configdir", "line_number": 621, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 621, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 632, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 632, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 646, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 646, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 660, "usage_type": "call"}, {"api_name": "roundwared.db.log_event", "line_number": 661, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 661, "usage_type": "name"}, {"api_name": "roundware.rw.models.Session.objects.select_related", "line_number": 664, "usage_type": "call"}, {"api_name": "roundware.rw.models.Session", "line_number": 664, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 664, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 668, "usage_type": "call"}, {"api_name": "roundware.rw.models.Language.objects.filter", "line_number": 669, "usage_type": "call"}, {"api_name": "roundware.rw.models.Language", "line_number": 669, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 669, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 673, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 673, "usage_type": "name"}, {"api_name": "roundwared.rounddbus.emit_stream_signal", "line_number": 677, "usage_type": "call"}, {"api_name": "roundwared.rounddbus", "line_number": 677, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 693, "usage_type": "call"}, {"api_name": "roundwared.rounddbus.emit_stream_signal", "line_number": 694, "usage_type": "call"}, {"api_name": "roundwared.rounddbus", "line_number": 694, "usage_type": "name"}, {"api_name": "roundwared.rounddbus.emit_stream_signal", "line_number": 700, "usage_type": "call"}, {"api_name": "roundwared.rounddbus", "line_number": 700, "usage_type": "name"}, {"api_name": "roundwared.db.log_event", "line_number": 701, "usage_type": "call"}, {"api_name": "roundwared.db", "line_number": 701, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 714, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 715, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 716, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 717, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 729, "usage_type": "call"}, {"api_name": "roundwared.icecast2.Admin", "line_number": 730, "usage_type": "call"}, {"api_name": "roundwared.icecast2", "line_number": 730, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 730, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 730, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 731, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 731, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 732, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 732, "usage_type": "name"}, {"api_name": "roundwared.roundexception.RoundException", "line_number": 738, "usage_type": "call"}, {"api_name": "roundwared.roundexception", "line_number": 738, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 739, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 743, "usage_type": "call"}, {"api_name": "roundwared.icecast2.Admin", "line_number": 744, "usage_type": "call"}, {"api_name": "roundwared.icecast2", "line_number": 744, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 744, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 744, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 745, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 745, "usage_type": "name"}, {"api_name": "roundwared.settings.config", "line_number": 746, "usage_type": "attribute"}, {"api_name": "roundwared.settings", "line_number": 746, "usage_type": "name"}, {"api_name": "roundware.rw.models.Speaker.objects.filter", "line_number": 753, "usage_type": "call"}, {"api_name": "roundware.rw.models.Speaker", "line_number": 753, "usage_type": "attribute"}, {"api_name": "roundware.rw.models", "line_number": 753, "usage_type": "name"}, {"api_name": "roundwared.gpsmixer.distance_in_meters", "line_number": 757, "usage_type": "call"}, {"api_name": "roundwared.gpsmixer", "line_number": 757, "usage_type": "name"}]} +{"seq_id": "194613050", "text": "import os\nimport time\nimport json\nfrom PyQt5.QtCore import QTimer\n\nclass Settings(dict):\n \"\"\"\n A subclass of dictionary which will automatically handle saving\n settings and loading settings\n \"\"\"\n saveSettingsTimer = QTimer()\n def __init__(self, *args, **kwargs):\n self.skipKeys = []\n\n self.acceptableFileAge = 30 * 60 #30 min default\n self.filePath = None\n self.saveInterval = 10 * 60 * 1000 # 10 minute default\n self.saveSettingsTimer.timeout.connect(self.saveSettings)\n self.saveSettingsTimer.setInterval(self.saveInterval)\n self.saveSettingsTimer.start()\n super(Settings, self).__init__(*args, **kwargs)\n\n def __setitem__(self, key, value):\n super(Settings, self).__setitem__(key, value)\n self.saveSettings()\n\n def setSavingInterval(self, interval=10*60*1000):\n self.saveInterval = interval\n self.saveSettingsTimer.setInterval(self.saveInterval)\n\n def saveSettings(self):\n if self.filePath is None: return\n try:\n saveDict = {k: v for k, v in self.items() if k not in self.skipKeys}\n with open(self.filePath, 'w') as fh:\n json.dump(saveDict, fh, separators=(',', ': '),\n sort_keys=True, indent=4, default=lambda x: 'NotSerial')\n except AttributeError:\n pass\n\n def checkFile(self):\n \"\"\"\n This will check to see wheteher there's a previous settings file,\n and if it's recent enough that it should be loaded\n :return:\n \"\"\"\n if not os.path.isfile(self.filePath):\n # File doesn't exist\n return False\n if (time.time() - os.path.getmtime(self.filePath)) > self.acceptableFileAge:\n # It's been longer than 30 minutes and likely isn't worth\n # keeping open\n return False\n return True\n\n def loadSettings(self):\n if not self.checkFile(): return False\n\n with open(self.filePath, 'r') as fh:\n savedDict = json.load(fh)\n self.update(savedDict)\n return True", "sub_path": "InstsAndQt/SettingsDict.py", "file_name": "SettingsDict.py", "file_ext": "py", "file_size_in_byte": 2105, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "PyQt5.QtCore.QTimer", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.getmtime", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "452746134", "text": "from sklearn import tree # to do DTs\nfrom sklearn.datasets import fetch_mldata # to import data\n\niris = fetch_mldata('iris', data_home='.') # read in the iris dataset\n\ndt = tree.DecisionTreeClassifier(criterion='entropy', # set classifier, use Entropy\n\t\t\t\tmax_depth=3) #set max depth to 3\n# I decided to test reducing max_depth and also setting min samples per leaves because both could be potential methods of reducing overfitting, and simplifying the tree.\n\nmodel = dt.fit(iris.data, iris.target) # construct a tree\n\nwith open(\"irisA.dot\", 'w') as f:\n f = tree.export_graphviz(model, out_file=f) #creates a .dot file of tree\n\n\n#When the maximum depth was set to three, the tree obviously became simpler and a little shorter. However, at many of the leaves at the end, the tree did not completely determine which type the iris was. However, although it did not work as well on the training data given, there is a chance it would work better on test data. Either way, this reduced overfitting with the cost of it not being 100% accurate in all situations.\n", "sub_path": "gans.lab4/part2a.py", "file_name": "part2a.py", "file_ext": "py", "file_size_in_byte": 1121, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sklearn.datasets.fetch_mldata", "line_number": 4, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 6, "usage_type": "name"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "593274750", "text": "#!/usr/bin/python3\nimport random\nimport sys\nimport time\nimport traceback\nfrom math import trunc\n\nimport chess\nimport chess.pgn\n\n\ndef place_kings(brd):\n while True:\n rank_white, file_white, rank_black, file_black = random.randint(0,7), random.randint(0,7), random.randint(0,7), random.randint(0,7)\n diff_list = [abs(rank_white - rank_black), abs(file_white - file_black)]\n if sum(diff_list) > 2 or set(diff_list) == (0, 2):\n brd[rank_white][file_white], brd[rank_black][file_black] = \"K\", \"k\"\n break\n \ndef populate_board(brd, wp, bp, white_pieces, black_pieces):\n for pieces, num in zip((white_pieces, black_pieces), (wp, bp)):\n for piece in random.sample(pieces, num):\n row, col = random.randint(0, 7), random.randint(0, 7)\n while brd[row][col] != \" \" or pawn_on_promotion_square(piece, row):\n row, col = random.randint(0, 7), random.randint(0, 7)\n brd[row][col] = piece\n \ndef pawn_on_promotion_square(pc, pr):\n if pc == \"P\" and pr == 0:\n return True\n elif pc == \"p\" and pr == 7:\n return True\n return False\n \ndef fen_from_board(brd):\n fen = \"\"\n for x in brd:\n n = 0\n for y in x:\n if y == \" \":\n n += 1\n else:\n if n != 0:\n fen += str(n)\n fen += y\n n = 0\n if n != 0:\n fen += str(n)\n fen += \"/\" if fen.count(\"/\") < 7 else \"\"\n fen += \" w - - 0 1\\n\"\n return fen\n\ndef gen_board(piece_num = 6, pieces = ''):\n board = [[\" \" for x in range(8)] for y in range(8)]\n place_kings(board)\n if pieces == '':\n white_num = random.randint(0, piece_num - 2)\n black_num = piece_num - 2 - white_num\n white_pieces = [\"R\", \"N\", \"B\", \"Q\", \"P\"]*1000\n black_pieces = [\"r\", \"n\", \"b\", \"q\", \"p\"]*1000\n populate_board(board, \n white_num, \n black_num, \n white_pieces, \n black_pieces)\n else:\n white_pieces, black_pieces = pieces.split('v')\n white_pieces = white_pieces[1:]\n black_pieces = black_pieces[1:].lower()\n populate_board(board, \n len(white_pieces), \n len(black_pieces), \n white_pieces, \n black_pieces)\n return board\n\ndef main():\n board = chess.Board(fen=None)\n with sys.stdout as out:\n \n for i in range(55000):\n #if not i%1000:\n # print(i)\n wcount = 1 + (i%4)\n bcount = 4 - (i%4)\n \n wp = 'K'\n wp += ''.join(random.choices('QRBNP', k=wcount))\n bp = 'k'\n bp += ''.join(random.choices('rbnqp', k=bcount))\n fen = fen_from_board(gen_board(pieces=wp+'v'+bp))\n board.set_fen(fen)\n if not board.is_valid() or board.is_game_over(claim_draw=True):\n continue\n\n out.write(board.epd())\n out.write(\"\\n\")\n out.flush()\nmain()\n\n", "sub_path": "dodgy/random7p.py", "file_name": "random7p.py", "file_ext": "py", "file_size_in_byte": 3126, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "random.randint", "line_number": 14, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 22, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 25, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "chess.Board", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 79, "usage_type": "attribute"}, {"api_name": "random.choices", "line_number": 88, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "264693532", "text": "# coding: utf-8\n#\n# Copyright 2015 Palantir Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains the Werkzeug server for debugging and WSGI compatibility.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom threading import Lock\n\nfrom werkzeug.debug import DebuggedApplication\nfrom werkzeug.exceptions import abort\nfrom werkzeug.local import Local, LocalManager, LocalProxy\nfrom werkzeug.routing import Map, Rule\nfrom werkzeug.serving import run_simple\nfrom werkzeug.wrappers import Request, Response\n\nfrom .errors import get_status_code_from_error_code\n\n__all__ = [\"Server\", \"DebuggedJsonRpcApplication\", \"current_request\"]\n\n\nDEFAULT_API_ENDPOINT_NAME = \"/api\"\n\n_CURRENT_REQUEST_KEY = \"current_request\"\n_local = Local() # pylint: disable=invalid-name\n_LOCAL_MANAGER = LocalManager([_local])\n\ncurrent_request = LocalProxy(_local, _CURRENT_REQUEST_KEY) # pylint: disable=invalid-name\n\"\"\"A thread-local which stores the current request object when dispatching requests for\n:class:`Server`.\n\nStores a :class:`werkzeug.wrappers.Request`.\n\n.. versionadded:: 0.2.0\n\"\"\"\n\n\nclass Server(object):\n \"\"\"A basic WSGI-compatible server for typedjsonrpc endpoints.\n\n :attribute registry: The registry for this server\n :type registry: typedjsonrpc.registry.Registry\n\n .. versionadded:: 0.1.0\n .. versionchanged:: 0.4.0 Now returns HTTP status codes\n \"\"\"\n\n def __init__(self, registry, endpoint=DEFAULT_API_ENDPOINT_NAME):\n \"\"\"\n :param registry: The JSON-RPC registry to use\n :type registry: typedjsonrpc.registry.Registry\n :param endpoint: The endpoint to publish JSON-RPC endpoints. Default \"/api\".\n :type endpoint: str\n \"\"\"\n self.registry = registry\n self._endpoint = endpoint\n self._url_map = Map([Rule(endpoint, endpoint=self._endpoint)])\n\n self._before_first_request_funcs = []\n\n self._after_first_request_handled = False\n self._before_first_request_lock = Lock()\n\n def _dispatch_request(self, request):\n self._try_trigger_before_first_request_funcs()\n adapter = self._url_map.bind_to_environ(request.environ)\n endpoint, _ = adapter.match()\n if endpoint == self._endpoint:\n return self._dispatch_jsonrpc_request(request)\n else:\n abort(404)\n\n def _dispatch_jsonrpc_request(self, request):\n json_output = self.registry.dispatch(request)\n if json_output is None:\n return Response(status=204)\n return Response(json_output,\n mimetype=\"application/json\",\n status=self._determine_status_code(json_output))\n\n def _determine_status_code(self, json_output):\n output = self.registry.json_decoder.decode(json_output)\n if isinstance(output, list) or \"result\" in output:\n return 200\n else:\n assert \"error\" in output, \"JSON-RPC is malformed and doesn't contain result or error\"\n return get_status_code_from_error_code(output[\"error\"][\"code\"])\n\n def wsgi_app(self, environ, start_response):\n \"\"\"A basic WSGI app\"\"\"\n @_LOCAL_MANAGER.middleware\n def _wrapped_app(environ, start_response):\n request = Request(environ)\n setattr(_local, _CURRENT_REQUEST_KEY, request)\n response = self._dispatch_request(request)\n return response(environ, start_response)\n return _wrapped_app(environ, start_response)\n\n def __call__(self, environ, start_response):\n return self.wsgi_app(environ, start_response)\n\n def run(self, host, port, **options):\n \"\"\"For debugging purposes, you can run this as a standalone server.\n\n .. WARNING:: **Security vulnerability**\n\n This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use\n this in production, you should run :class:`Server` as a standard WSGI app with\n `uWSGI `_ or another similar WSGI server.\n\n .. versionadded:: 0.1.0\n \"\"\"\n self.registry.debug = True\n debugged = DebuggedJsonRpcApplication(self, evalex=True)\n run_simple(host, port, debugged, use_reloader=True, **options)\n\n def _try_trigger_before_first_request_funcs(self): # pylint: disable=C0103\n \"\"\"Runs each function from ``self.before_first_request_funcs`` once and only once.\"\"\"\n if self._after_first_request_handled:\n return\n else:\n with self._before_first_request_lock:\n if self._after_first_request_handled:\n return\n for func in self._before_first_request_funcs:\n func()\n self._after_first_request_handled = True\n\n def register_before_first_request(self, func):\n \"\"\"Registers a function to be called once before the first served request.\n\n :param func: Function called\n :type func: () -> object\n\n .. versionadded:: 0.1.0\n \"\"\"\n self._before_first_request_funcs.append(func)\n\n\nclass DebuggedJsonRpcApplication(DebuggedApplication):\n \"\"\"A JSON-RPC-specific debugged application.\n\n This differs from DebuggedApplication since the normal debugger assumes you\n are hitting the endpoint from a web browser.\n\n A returned response will be JSON of the form: ``{\"traceback_id\": }`` which\n you can use to hit the endpoint ``http://:/debug/``.\n\n .. versionadded:: 0.1.0\n\n .. WARNING:: **Security vulnerability**\n\n This should never be used in production because users have arbitrary shell\n access in debug mode.\n \"\"\"\n def __init__(self, app, **kwargs):\n \"\"\"\n :param app: The wsgi application to be debugged\n :type app: typedjsonrpc.server.Server\n :param kwargs: The arguments to pass to the DebuggedApplication\n \"\"\"\n super(DebuggedJsonRpcApplication, self).__init__(app, **kwargs)\n self._debug_map = Map([Rule(\"/debug/\", endpoint=\"debug\")])\n\n def debug_application(self, environ, start_response):\n \"\"\"Run the application and preserve the traceback frames.\n\n :param environ: The environment which is passed into the wsgi application\n :type environ: dict[str, object]\n :param start_response: The start_response function of the wsgi application\n :type start_response: (str, list[(str, str)]) -> None\n :rtype: generator[str]\n\n .. versionadded:: 0.1.0\n \"\"\"\n adapter = self._debug_map.bind_to_environ(environ)\n if adapter.test():\n _, args = adapter.match()\n return self.handle_debug(environ, start_response, args[\"traceback_id\"])\n else:\n return super(DebuggedJsonRpcApplication, self).debug_application(environ,\n start_response)\n\n def handle_debug(self, environ, start_response, traceback_id):\n \"\"\"Handles the debug endpoint for inspecting previous errors.\n\n :param environ: The environment which is passed into the wsgi application\n :type environ: dict[str, object]\n :param start_response: The start_response function of the wsgi application\n :type start_response: (str, list[(str, str)]) -> NoneType\n :param traceback_id: The id of the traceback to inspect\n :type traceback_id: int\n\n .. versionadded:: 0.1.0\n \"\"\"\n if traceback_id not in self.app.registry.tracebacks:\n abort(404)\n self._copy_over_traceback(traceback_id)\n traceback = self.tracebacks[traceback_id]\n rendered = traceback.render_full(evalex=self.evalex, secret=self.secret)\n response = Response(rendered.encode('utf-8', 'replace'),\n headers=[('Content-Type', 'text/html; charset=utf-8'),\n ('X-XSS-Protection', '0')])\n return response(environ, start_response)\n\n def _copy_over_traceback(self, traceback_id):\n if traceback_id not in self.tracebacks:\n traceback = self.app.registry.tracebacks[traceback_id]\n self.tracebacks[traceback_id] = traceback\n for frame in traceback.frames:\n self.frames[frame.id] = frame\n", "sub_path": "typedjsonrpc/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 8860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "werkzeug.local.Local", "line_number": 37, "usage_type": "call"}, {"api_name": "werkzeug.local.LocalManager", "line_number": 38, "usage_type": "call"}, {"api_name": "werkzeug.local.LocalProxy", "line_number": 40, "usage_type": "call"}, {"api_name": "werkzeug.routing.Map", "line_number": 69, "usage_type": "call"}, {"api_name": "werkzeug.routing.Rule", "line_number": 69, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 74, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.abort", "line_number": 83, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 88, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 89, "usage_type": "call"}, {"api_name": "errors.get_status_code_from_error_code", "line_number": 99, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.Request", "line_number": 105, "usage_type": "call"}, {"api_name": "werkzeug.serving.run_simple", "line_number": 127, "usage_type": "call"}, {"api_name": "werkzeug.debug.DebuggedApplication", "line_number": 152, "usage_type": "name"}, {"api_name": "werkzeug.routing.Map", "line_number": 175, "usage_type": "call"}, {"api_name": "werkzeug.routing.Rule", "line_number": 175, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.abort", "line_number": 209, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "82918639", "text": "import numpy as np\nimport scipy.signal\nimport PIL.Image, PIL.ImageDraw, PIL.ImageFilter, PIL.ImageFont, PIL.ImageOps\nfrom .utilities import _coord_circle\nfrom .rescale import rescale\n\ndef image_blobs(width=500, height=500, n=100, sd=8):\n \"\"\"Return an image with blobs of the same standard deviations (SD).\n\n >>> import pyllusion as ill\n >>>\n >>> ill.image_blobs(n=500) #doctest: +ELLIPSIS\n \n\n \"\"\"\n\n array = np.zeros((height, width))\n for _ in range(n):\n x = np.random.randint(width)\n y = np.random.randint(height)\n blob = _image_blob(x=x, y=y, width=width, height=height, sd=sd)\n array += blob\n\n array = rescale(array, to=[0, 255])\n image = PIL.Image.fromarray(array.astype(np.uint8))\n return image\n\n\ndef image_blob(x=450, y=100, width=800, height=600, sd=3):\n \"\"\"Return an image of blob\n \"\"\"\n array = _image_blob(x=x, y=y, width=width, height=height, sd=sd)\n array = rescale(array, to=[0, 255])\n image = PIL.Image.fromarray(array.astype(np.uint8))\n return image\n\n\ndef _image_blob(x=450, y=100, width=800, height=600, sd=3):\n \"\"\"Returns a 2D Gaussian kernel.\n\n >>> import pyllusion as ill\n >>> import matplotlib.pyplot as plt\n >>> array = _image_blob(sd=8)\n >>> plt.imshow(array) #doctest: +ELLIPSIS\n <...>\n \"\"\"\n\n _x = height - x\n _y = width - y\n parent_width = 3 * (np.max([x, y, _x, _y]))\n gkern1d = scipy.signal.gaussian(parent_width, std=sd).reshape(parent_width, 1)\n parent_blob = np.outer(gkern1d, gkern1d)\n\n w = int(parent_width / 2)\n blob = parent_blob[w - y: (w - y) + height, w - x: (w - x) + width]\n return blob\n\n\ndef _draw_blob(width, height=None, size=0.1, blur=0, color=\"black\"):\n # Retrieve dimensions\n if height is None:\n width, height = width\n elif isinstance(width, PIL.Image.Image):\n width, height = width.size\n\n # Create mask of image size\n blob = PIL.Image.new(\"RGBA\", (width, height))\n\n # Blob coordinates\n coord = _coord_circle(blob,\n diameter=size,\n x=np.random.uniform(-1, 1),\n y=np.random.uniform(-1, 1))\n\n # Draw blob\n draw = PIL.ImageDraw.Draw(blob)\n draw.ellipse(coord, fill=color)\n\n blob = blob.filter(PIL.ImageFilter.GaussianBlur(radius=blur * 0.01 * width))\n return blob\n", "sub_path": "build/lib/pyllusion/image/image_blobs.py", "file_name": "image_blobs.py", "file_ext": "py", "file_size_in_byte": 2383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rescale.rescale", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rescale.rescale", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.signal.signal.gaussian", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.signal.signal", "line_number": 51, "usage_type": "attribute"}, {"api_name": "scipy.signal", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.outer", "line_number": 52, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.Image.Image.new", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 67, "usage_type": "name"}, {"api_name": "utilities._coord_circle", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageDraw.Draw", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image.ImageDraw", "line_number": 76, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "PIL.Image.ImageFilter.GaussianBlur", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image.ImageFilter", "line_number": 79, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "545641779", "text": "# P 2-1\nfrom collections import defaultdict\n\nmemory = defaultdict(int)\npc = 0\n\nmemory.update({i: int(n) for i, n in enumerate(open('2-1.txt', 'r').readline().split(','))})\n\nmemory[1] = 12\nmemory[2] = 2\nwhile memory[pc] != 99:\n op, l1, l2, r = memory[pc], memory[pc + 1], memory[pc + 2], memory[pc +3]\n if op == 1:\n memory[r] = memory[l1] + memory[l2]\n elif op == 2:\n memory[r] = memory[l1] * memory[l2]\n else:\n print('Error at pc={}'.format(pc))\n print('Memory Dump:')\n print(memory)\n break\n pc += 4\n\nprint(memory)\nprint(memory[0])\n", "sub_path": "2-1.py", "file_name": "2-1.py", "file_ext": "py", "file_size_in_byte": 589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "collections.defaultdict", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "266680538", "text": "###########################################################################################\n# -- OPTIMIZA\n# -- DESCRIPCION: FUNCION PARA OBTENER EN NUMERO FLOAT CONVERTIDO EN HORAS\n# -- AUTOR: JOSE LUIS CONDORI JARA\n# -- CAMBIOS: NUMERO FECHA (DD/MM/YYYY) PERSONA CAMBIOS EFECTUADOS\n# -- 00001 13/12/2019 JOSE CONDORI OBSERVACIONES YENNI - PLEs\n# -- 00002 16/12/2019 JOSE CONDORI OBSERVACIONES YENNI - PLEs\n# -----------------------------------------------------------------------------------------\nfrom odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\nimport base64\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass AccountInvoiceConfirm(models.TransientModel):\n _name = \"account.bill.txt\"\n _description = \"Generate TXT\"\n\n state = fields.Selection([('choose', 'choose'), ('get', 'get')], default='choose')\n txt_filename = fields.Char('filename', readonly=True)\n txt_binary = fields.Binary('file', readonly=True)\n\n # Parametros\n date_month = fields.Selection(string=\"Mes\", selection=[('01', 'Enero'),\n ('02', 'Febrero'),\n ('03', 'Marzo'),\n ('04', 'Abril'),\n ('05', 'Mayo'),\n ('06', 'Junio'),\n ('07', 'Julio'),\n ('08', 'Agosto'),\n ('09', 'Septiembre'),\n ('10', 'Octubre'),\n ('11', 'Noviembre'),\n ('12', 'Diciembre')],\n default=lambda s: fields.Date.context_today(s).strftime(\"%m\"))\n date_year = fields.Char(string=\"Año\", size=4, default=lambda s: fields.Date.context_today(s).strftime(\"%Y\"))\n type = fields.Selection(string=\"Reporte de\", selection=[('out_invoice', 'Ventas'), ('in_invoice', 'Compras')])\n company_id = fields.Many2one('res.company', string='Compañia',\n default=lambda self: self._context.get('company_id', self.env.user.company_id.id))\n\n @api.multi\n def generate_file(self):\n\n if self.type == \"out_invoice\":\n type_doc = ['out_invoice', 'out_refund']\n elif self.type == \"in_invoice\":\n type_doc = ['in_invoice', 'in_refund']\n else:\n raise ValidationError(\"No se encontraton facturas\")\n\n dominio = [('type', 'in', type_doc),\n ('state', 'not in', ['draft']),\n ('type_operation', 'not in', ['4', '5']),\n ('month_year_inv', 'like', self.date_month + \"\" + self.date_year),\n ('company_id', '=', self.company_id.id)]\n\n # if self.type == \"out_invoice\":\n # dominio = expression.AND([dominio, [('type_operation', 'in', ['1', '2', '3'])]])\n\n # 00002 - Inicio\n invoice_obj = self.env['account.invoice']\n invoice_ids = invoice_obj.search(dominio, order=\"id asc\")\n # 00002 - Fin\n\n # if len(invoice_ids) == 0:\n # raise ValidationError(\"No se encontraton facturas\")\n\n content = \"\"\n # Proveedores / Compras\n if self.type in \"in_invoice,in_refund\":\n ruc = self.env.user.company_id.vat\n periodo = self.date_year + \"\" + self.date_month\n cantidad = '0'\n for inv in invoice_ids:\n\n # 00002 - Inicio\n # 14 Base imponible - VTAS GRAV. Y/O DE EXP\n campo_14 = \"\"\n if inv.type_operation == \"1\":\n if inv.currency_id != inv.company_id.currency_id:\n # campo_14 = inv.company_id.currency_id.round(inv.amount_untaxed * inv.exchange_rate)\n campo_14 = inv.company_id.currency_id.round(inv.total_base_igv * inv.exchange_rate)\n else:\n # campo_14 = inv.amount_untaxed\n campo_14 = inv.total_base_igv\n\n # 15 Impuesto - VTAS GRAV. Y/O DE EXP\n campo_15 = \"\"\n if inv.type_operation == \"1\":\n if inv.currency_id != inv.company_id.currency_id:\n campo_15 = inv.company_id.currency_id.round(inv.total_igv * inv.exchange_rate)\n else:\n campo_15 = inv.total_igv\n\n # 16 Base imponible - VTAS GRAV. Y/O DE EXP.Y A OP. NO GRAV.\n campo_16 = \"\"\n if inv.type_operation == \"2\":\n if inv.currency_id != inv.company_id.currency_id:\n # campo_16 = inv.company_id.currency_id.round(inv.amount_untaxed * inv.exchange_rate)\n campo_16 = inv.company_id.currency_id.round(inv.total_base_igv * inv.exchange_rate)\n else:\n # campo_16 = inv.amount_untaxed\n campo_16 = inv.total_base_igv\n\n # 17 Impuesto - VTAS GRAV. Y/O DE EXP.Y A OP. NO GRAV.\n campo_17 = \"\"\n if inv.type_operation == \"2\":\n if inv.currency_id != inv.company_id.currency_id:\n # campo_17 = inv.company_id.currency_id.round(inv.amount_tax * inv.exchange_rate)\n campo_17 = inv.company_id.currency_id.round(inv.total_igv * inv.exchange_rate)\n else:\n # campo_17 = inv.amount_tax\n campo_17 = inv.total_igv\n\n # 18 Base imponible - VTAS NO GRAV.\n campo_18 = \"\"\n if inv.type_operation == \"3\":\n if inv.currency_id != inv.company_id.currency_id:\n # campo_18 = inv.company_id.currency_id.round(inv.amount_untaxed * inv.exchange_rate)\n campo_18 = inv.company_id.currency_id.round(inv.total_base_igv * inv.exchange_rate)\n else:\n # campo_18 = inv.amount_untaxed\n campo_18 = inv.total_base_igv\n\n # 19 Impuesto - VTAS NO GRAV.\n campo_19 = \"\"\n if inv.type_operation == \"3\":\n if inv.currency_id != inv.company_id.currency_id:\n # campo_19 = inv.company_id.currency_id.round(inv.amount_tax * inv.exchange_rate)\n campo_19 = inv.company_id.currency_id.round(inv.total_igv * inv.exchange_rate)\n else:\n # campo_19 = inv.amount_tax\n campo_19 = inv.total_igv\n # 00002 - Fin\n\n # 20 Total Adeudado\n if inv.currency_id != inv.company_id.currency_id:\n campo_20 = inv.company_id.currency_id.round(inv.total_no_gravado * inv.exchange_rate)\n else:\n campo_20 = inv.total_no_gravado\n\n # 21 Impuesto\n if inv.currency_id != inv.company_id.currency_id:\n campo_21 = inv.company_id.currency_id.round(inv.total_isc * inv.exchange_rate)\n else:\n campo_21 = inv.total_isc\n\n # 22 Otros de las Lineas\n if inv.currency_id != inv.company_id.currency_id:\n campo_22 = inv.company_id.currency_id.round(inv.total_otros * inv.exchange_rate)\n else:\n campo_22 = inv.total_otros\n\n # 23 Total\n if inv.currency_id != inv.company_id.currency_id:\n campo_23 = inv.company_id.currency_id.round(inv.amount_total * inv.exchange_rate)\n else:\n campo_23 = inv.amount_total\n\n # 33 -> Tipo de Pago\n campo_33 = \"\"\n if inv.retencion == \"ARET\":\n campo_33 = \"ARET(Detección Automática Retención)\"\n if inv.retencion == \"SRET\":\n campo_33 = \"SRET(Siempre Retención)\"\n\n # 41 ->\n campo_41 = \"0\"\n if inv.total_igv > 0:\n if inv.date_invoice and inv.date:\n if inv.date.strftime(\"%m\") == inv.date_invoice.strftime(\"%m\"):\n campo_41 = \"1\"\n if int(inv.date.strftime(\"%m\")) > int(inv.date_invoice.strftime(\"%m\")):\n campo_41 = \"6\"\n\n if inv.type == 'in_refund':\n campo_14 = campo_14 * -1\n campo_15 = campo_15 * -1\n campo_16 = campo_16 * -1\n campo_17 = campo_17 * -1\n campo_18 = campo_18 * -1\n campo_19 = campo_19 * -1\n campo_20 = campo_20 * -1\n campo_21 = campo_21 * -1\n campo_22 = campo_22 * -1\n campo_23 = campo_23 * -1\n\n txt_line = \"%s|%s|M%s|%s|%s|%s|%s|%s|%s||%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%.3f|%s|%s|%s|\" \\\n \"%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\" % (\n # inv.move_id.date.strftime(\"%Y%m\") if inv.move_id.date else '', # Periodo Asiento -> 1\n (periodo + \"00\") or '', # Periodo Asiento -> 1\n inv.move_id.name.replace(\"/\", \"\") or '', # Correlativo de Factura -> 2\n str(inv.move_id.id).zfill(4) or '',\n # Correlativo de todos los asientos no solo facturas -> 3\n inv.date.strftime(\"%d/%m/%Y\") if inv.date else '', # Fecha de la Factura -> 4\n inv.date_due.strftime(\"%d/%m/%Y\") \\\n if inv.date_due else '01/01/0001', # Fecha Vencimiento -> 5\n inv.document_type_id.code or '', # N° del Tipo de Documento -> 6\n str(inv.invoice_serie if inv.invoice_serie else 0).zfill(4),\n # Numero de la Factura -> 7\n inv.year_emission_dua or '', # Año de emision del DUA -> 8\n str(inv.invoice_number and inv.invoice_number or 0).zfill(8) or '', # Numero -> 9\n # Omitido -> 10\n # N° Tipo de Documento Identidad -> 11\n inv.partner_id.catalog_06_id.code or '',\n inv.partner_id.vat or '', # N° de Documento de Identidad -> 12\n inv.partner_id.name or '', # Nombre del Proveedor -> 13\n \"{0:.2f}\".format(campo_14 if campo_14 else 0.00), # Base imponible -> 14\n \"{0:.2f}\".format(campo_15 if campo_15 else 0.00), # Total -> 15\n \"{0:.2f}\".format(campo_16 if campo_16 else 0.00), # Base imponible -> 16\n \"{0:.2f}\".format(campo_17 if campo_17 else 0.00), # Impuesto -> 17\n \"{0:.2f}\".format(campo_18 if campo_18 else 0.00), # Base imponible -> 18\n \"{0:.2f}\".format(campo_19 if campo_19 else 0.00), # Impuesto -> 19\n \"{0:.2f}\".format(campo_20 if campo_20 else 0.00), # Total Adeudado -> 20\n \"{0:.2f}\".format(campo_21 if campo_21 else 0.00), # Impuesto -> 21\n \"{0:.2f}\".format(campo_22 if campo_22 else 0.00), # Otros de las Lineas -> 22\n \"{0:.2f}\".format(campo_23 if campo_23 else 0.00), # Total -> 23\n inv.currency_id.name or '', # Tipo de moneda -> 24\n inv.exchange_rate or 1.000, # Tipo de Cambio-> 25\n inv.refund_invoice_date_invoice.strftime(\"%d/%m/%Y\") \\\n if inv.refund_invoice_date_invoice else '', # Fecha documento que modifica -> 26\n inv.refund_invoice_document_type_id.code or '', # Tipo documento que modifica -> 27\n inv.refund_invoice_sunat_serie or '', # Serie del documento que modifica -> 28\n inv.refund_invoice_code_dua.code or '', # Codigo DUA -> 29\n inv.refund_invoice_sunat_number or '', # Numero DUA -> 30\n inv.date_detraction.strftime(\"%d/%m/%Y\") \\\n if inv.date_detraction else '', # Fecha de Detracciones -> 31\n inv.num_detraction or '', # Numero de Detracciones -> 32\n campo_33 or '', # Marca de Comprobante -> 33\n inv.classifier_good.code or '', # Clasificador de Bienes -> 34\n '', # -> 35\n '', # -> 36\n '', # -> 37\n '', # -> 38\n '', # -> 39\n \"1\" if inv.state == 'paid' else \"\", # -> 40\n campo_41 or '', # -> 41\n )\n content = content + \"\" + txt_line + \"\\r\\n\"\n if cantidad == '0':\n cantidad = '1'\n if cantidad == '0':\n content = \"\\r\\n\"\n self.write({\n 'state': 'get',\n 'txt_binary': base64.b64encode(content.encode('ISO-8859-1')),\n # 'txt_filename': \"compras.txt\"\n 'txt_filename': \"LE\" + (\n ruc if ruc else \"00000000000\") + \"\" + periodo + \"00080100001\" + cantidad + \"11.txt\"\n })\n # Clientes / Ventas\n if self.type in \"out_invoice,out_refund\":\n ruc = self.env.user.company_id.vat\n periodo = self.date_year + \"\" + self.date_month\n cantidad = '0'\n for inv in invoice_ids:\n\n # 08 -> Numero de Documento\n campo_08 = \"\"\n if inv.sunat_number:\n # 00002 - Inicio\n # temp_num = int(inv.sunat_number)\n # if inv.document_type_id.code in '01,03,07,08':\n # campo_08 = str(temp_num).zfill(4)\n # elif inv.document_type_id.code in '12':\n # campo_08 = str(temp_num).zfill(20)\n campo_08 = inv.sunat_number\n # 00002 - Fin\n\n # 34 -> Fechas\n # 00002 - Inicio\n codigo_34 = '1'\n # if inv.total_igv > 0:\n # codigo_34 = '1'\n # elif inv.total_igv <= 0:\n # codigo_34 = '0'\n if inv.state == 'cancel':\n codigo_34 = '2'\n else:\n invoice_refund = invoice_obj.search([('state', '!=', 'draft'),\n ('type', '=', 'out_refund'),\n ('refund_invoice_id', '=', inv.id)])\n if invoice_refund:\n if inv.reconciled and invoice_refund.reconciled:\n codigo_34 = '2'\n # 00002 - Fin\n\n # 05 -> Fecha de Vencimiento o Fecha de Pago (1)\n campo_05 = '01/01/0001'\n if inv.document_type_id.code == '06' and codigo_34 != '2':\n campo_05 = inv.date_due and inv.date_due.strftime(\"%d/%m/%Y\") or ''\n\n # 00001 - Inicio\n # 13 -> Factura de Exportacion\n if inv.currency_id != inv.company_id.currency_id:\n campo_13 = inv.company_id.currency_id.round(inv.inv_fac_exp * inv.exchange_rate)\n else:\n campo_13 = inv.inv_fac_exp\n\n # 14 -> Base imponible\n if inv.currency_id != inv.company_id.currency_id:\n # campo_14 = inv.company_id.currency_id.round(inv.amount_untaxed * inv.exchange_rate)\n campo_14 = inv.company_id.currency_id.round(inv.total_base_igv * inv.exchange_rate)\n # campo_14 = inv.currency_id._convert(inv.amount_untaxed,\n # inv.company_id.currency_id,\n # inv.company_id,\n # inv.date or fields.Date.today())\n else:\n # campo_14 = inv.total_igv\n campo_14 = inv.total_base_igv\n\n # 16 -> Impuesto General\n if inv.currency_id != inv.company_id.currency_id:\n # campo_16 = inv.company_id.currency_id.round(inv.amount_tax * inv.exchange_rate)\n campo_16 = inv.company_id.currency_id.round(inv.total_igv * inv.exchange_rate)\n else:\n # campo_16 = inv.amount_tax\n campo_16 = inv.total_igv\n # 00001 - Fin\n\n # 18 -> Impuesto General\n if inv.currency_id != inv.company_id.currency_id:\n campo_18 = inv.company_id.currency_id.round(inv.total_exonerado * inv.exchange_rate)\n else:\n campo_18 = inv.total_exonerado\n\n # 19 -> Impuesto General\n if inv.currency_id != inv.company_id.currency_id:\n campo_19 = inv.company_id.currency_id.round(inv.total_inafecto * inv.exchange_rate)\n else:\n campo_19 = inv.total_inafecto\n\n # 20 -> Impuesto General\n if inv.currency_id != inv.company_id.currency_id:\n campo_20 = inv.company_id.currency_id.round(inv.total_isc * inv.exchange_rate)\n else:\n campo_20 = inv.total_isc\n\n # 00001 - Inicio\n # 23 -> Impuesto General\n if inv.currency_id != inv.company_id.currency_id:\n # campo_23 = inv.company_id.currency_id.round(inv.inv_otros * inv.exchange_rate)\n campo_23 = inv.company_id.currency_id.round(inv.total_otros * inv.exchange_rate)\n else:\n # campo_23 = inv.inv_otros\n campo_23 = inv.total_otros\n # 00001 - Fin\n\n # 24 -> Impuesto General\n if inv.currency_id != inv.company_id.currency_id:\n campo_24 = inv.company_id.currency_id.round(inv.amount_total * inv.exchange_rate)\n else:\n campo_24 = inv.amount_total\n\n # 00001 - Inicio\n # Para nota de credito, convertimos a negativo los valores monetarios\n if inv.type == 'out_refund':\n campo_13 = campo_13 * -1\n campo_16 = campo_16 * -1\n campo_18 = campo_18 * -1\n campo_19 = campo_19 * -1\n campo_20 = campo_20 * -1\n campo_23 = campo_23 * -1\n campo_24 = campo_24 * -1\n campo_14 = campo_14 * -1\n campo_15 = 0\n # 00002 - Inicio\n campo_17 = 0\n if inv.date_invoice and inv.refund_invoice_id.date_invoice:\n if inv.date_invoice.strftime(\"%m\") != inv.refund_invoice_id.date_invoice.strftime(\"%m\"):\n campo_15 = campo_14\n campo_17 = campo_16\n campo_14 = 0\n campo_16 = 0\n # 00002 - Fin\n else:\n campo_15 = 0\n campo_17 = 0\n # 00001 - Fin\n\n txt_line = \"%s|%s|M%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%.3f|%s|%s|%s\" \\\n \"|%s|||%s|%s|\" % (\n (periodo + \"00\") or '', # Periodo Asiento -> 1\n inv.move_id.name.replace(\"/\", \"\") \\\n if inv.move_id.name else '', # Correlativo de Factura -> 2\n str(inv.move_id.id).zfill(4) or '',\n # Correlativo de todos los asientos no solo facturas -> 3\n inv.date_invoice and inv.date_invoice.strftime(\"%d/%m/%Y\") or \"\", # Fecha Emisión -> 4\n campo_05, # Fecha de Vencimiento -> 5\n inv.document_type_id.code or '', # N° del Tipo de Documento -> 6\n str(inv.sunat_serie.name if inv.sunat_serie else 0).zfill(4), # Serie de Documento -> 7\n campo_08, # Numero de Documento -> 8\n '', # Dejan en blanco -> 9\n inv.partner_id.catalog_06_id.code or '',\n # Tipo de Documento -> 10\n inv.partner_id.vat or '', # Numero de Documento -> 11\n inv.partner_id.name or '', # Nombre del Proveedor -> 12\n # 00001 - Inicio\n # rec.total_exonerado or '', # Factura de Exportacion -> 13\n \"{0:.2f}\".format(campo_13 if campo_13 else 0.00), # Factura de Exportacion -> 13\n \"{0:.2f}\".format(campo_14 if campo_14 else 0.00), # Impuesto no incluido -> 14\n \"{0:.2f}\".format(campo_15 if campo_15 else 0.00), # Impuesto -> 15\n \"{0:.2f}\".format(campo_16 if campo_16 else 0.00), # Impuesto -> 16\n \"{0:.2f}\".format(campo_17 if campo_17 else 0.00), # Impuesto -> 17\n \"{0:.2f}\".format(campo_18 if campo_18 else 0.00), # Importe exonerado -> 18\n \"{0:.2f}\".format(campo_19 if campo_19 else 0.00), # Importe inafecto -> 19\n \"{0:.2f}\".format(campo_20 if campo_20 else 0.00), # Impuesto -> 20\n \"{0:.2f}\".format(0.00), # Base Imponible -> 21\n \"{0:.2f}\".format(0.00), # Impuesto -> 22\n \"{0:.2f}\".format(campo_23 if campo_23 else 0.00), # Impuesto -> 23\n \"{0:.2f}\".format(campo_24 if campo_24 else 0.00), # Total -> 24\n inv.currency_id.name if inv.currency_id.name else '', # Tipo de moneda -> 25\n inv.exchange_rate or 1.000, # Tipo de Cambio -> 26\n inv.refund_invoice_date_invoice.strftime(\"%d/%m/%Y\") \\\n if inv.refund_invoice_date_invoice else '01/01/0001',\n # Fecha del Documento Asociado -> 27\n inv.refund_invoice_document_type_id.code or '00', # Tipo del Documento Asociado -> 28\n inv.refund_invoice_sunat_serie or '0000', # Serie del Documento Asociado -> 29\n inv.refund_invoice_sunat_number or '00000000', # Numero del Documento Asociado -> 30\n # 2 campos en blanco -> 31, 32\n \"1\" if inv.state == 'paid' else \"\",\n codigo_34 or '', # -> 34\n # 1 campo en blanco -> 35\n )\n content = content + \"\" + txt_line + \"\\r\\n\"\n if cantidad == '0':\n cantidad = '1'\n if cantidad == '0':\n content = \"\\r\\n\"\n self.write({\n 'state': 'get',\n 'txt_binary': base64.b64encode(content.encode('ISO-8859-1')),\n # 'txt_filename': \"ventas.txt\"\n 'txt_filename': \"LE\" + (\n ruc if ruc else \"00000000000\") + \"\" + periodo + \"00140100001\" + cantidad + \"11.txt\"\n })\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Reporte PLE',\n 'res_model': 'account.bill.txt',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'target': 'new'\n }\n", "sub_path": "Funcion_Digital/qa_contabilidad_ple_75uit_peru/wizard/account_bill_txt.py", "file_name": "account_bill_txt.py", "file_ext": "py", "file_size_in_byte": 24951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 17, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 17, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 21, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 21, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 22, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "odoo.fields.Binary", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 26, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "odoo.fields.Date.context_today", "line_number": 38, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 38, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 38, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 39, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 39, "usage_type": "name"}, {"api_name": "odoo.fields.Date.context_today", "line_number": 39, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 39, "usage_type": "attribute"}, {"api_name": "odoo.fields.Selection", "line_number": 40, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 40, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 41, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 41, "usage_type": "name"}, {"api_name": "odoo.exceptions.ValidationError", "line_number": 52, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 249, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 435, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 44, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "535241009", "text": "\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport os\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\nfrom sklearn.model_selection import train_test_split\n\n\nimg_folder = '../data/output_images/'\n\n'''\nLoads csv only, no images.\n'''\ndef GetCSVs(sample_size):\n\n # Name of folder\n names = [\n 'Australia',\n 'Germany',\n 'Netherlands',\n 'Switzerland',\n 'Amtrak',\n 'BostonMTBA',\n 'DenverRTD',\n 'LosAngelesMR',\n 'NewarkLR',\n 'SeattleLLR',\n ]\n\n # Name of csv\n abbr = [\n 'AUS',\n 'GRM',\n 'NET',\n 'SWZ',\n 'AMT',\n 'BOS',\n 'DEN',\n 'LAA',\n 'NEW',\n 'SEA',\n ]\n locations = dict(zip(names,abbr))\n\n # Collect each csv into one df adding railway name\n frames = []\n for key,value in locations.items():\n try:\n filename = img_folder+key+'/'+value+'.csv'\n tmp = pd.read_csv(filename,header=0)\n tmp['Railway'] = key\n\n # Take sample from each folder \n tmp = tmp.sample(frac=sample_size).reset_index(drop=True)\n frames.append(tmp)\n except Exception as e:\n print(e)\n\n df = pd.concat(frames)\n\n df = df.dropna()\n df['Catenary'] = df['Catenary'].astype(int)\n \n \n '''\n Open known non-catenary lines and add differntial to df\n '''\n\n\n zeros = df.Catenary.value_counts()[0]\n ones = df.Catenary.value_counts()[1]\n\n names = [\n 'Amtrak_non_cat_1',\n 'Amtrak_non_cat_2',\n 'Amtrak_non_cat_3',\n # 'Random'\n ]\n\n abbr = [\n 'ANC',\n 'ANC2',\n 'ANC3',\n # 'RAN'\n ]\n\n locations['Amtrak_non_cat_1'] = 'ANC'\n locations['Amtrak_non_cat_2'] = 'ANC2'\n locations['Amtrak_non_cat_3'] = 'ANC3'\n locations['Random'] = 'RAN'\n\n locations2 = dict(zip(names,abbr))\n\n diff = ones - zeros\n\n if diff > 0:\n frames = []\n for key,value in locations2.items():\n try:\n filename = img_folder+key+'/'+value+'.csv'\n tmp = pd.read_csv(filename,header=0)\n tmp['Railway'] = key\n frames.append(tmp)\n except Exception as e:\n print(e)\n\n try:\n duds = pd.concat(frames)\n duds = duds.dropna()\n duds['Catenary'] = duds['Catenary'].astype(int) \n\n duds = duds.sample(n=diff).reset_index(drop=True)\n df = pd.concat([df,duds]).reset_index(drop=True)\n except Exception as e:\n print(e)\n duds = duds.sample(len(duds.index.tolist())).reset_index(drop=True)\n df = pd.concat([df,duds]).reset_index(drop=True)\n\n else:\n df.sort_values(by='Catenary',inplace=True)\n df = df.iloc[abs(diff):]\n df = df.sample(frac=1.0)\n \n return df\n\n\n# In[6]:\n\n\n'''\nGet image paths and labels as lists\n'''\ndef GetPaths(df):\n\n rows = df.index.tolist()\n path = GetABSPath(img_folder)\n img_paths = []\n labels = []\n for row in rows:\n tmp = df.iloc[row]\n img_path = path+'/'+tmp.Railway+'/set_2/'+tmp.Name+'.png'\n img_paths.append(img_path)\n label = int(tmp.Catenary)\n labels.append(label)\n\n print(len(img_paths))\n \n return img_paths,labels\n\n\n# In[7]:\n\n\ndef GetABSPath(folder):\n return os.path.abspath(folder)\n\n\n# In[8]:\ndef DataAugment(image):\n image = tf.keras.preprocessing.image.random_rotation(\n image,\n 30,\n row_axis=1,\n col_axis=2,\n channel_axis=0,\n fill_mode='nearest',\n cval=0.0,\n interpolation_order=1)\n\n return image\n\n\ndef PreprocessImage(img_path):\n img_raw = tf.io.read_file(img_path)\n image = tf.io.decode_png(img_raw, channels=3)\n image = tf.image.resize(image, img_size)\n # print(image.shape)\n image /= 255.0 # normalize to [0,1] range\n\n return image\n\ndef flip(x: tf.Tensor) -> tf.Tensor:\n x = tf.image.random_flip_left_right(x)\n x = tf.image.random_flip_up_down(x)\n\n return x\n\ndef rotate(x: tf.Tensor) -> tf.Tensor:\n return tf.image.rot90(x, tf.random_uniform(shape=[], minval=0, maxval=4, dtype=tf.int32))\n\n\ndef SplitDataSet(img_paths, labels):\n \n # split lists into training/test \n X_train, X_test, Y_train, Y_test = train_test_split(img_paths,labels,test_size = .1, random_state=1)\n\n # split lists into training/validation \n X_train, X_val, Y_train, Y_val = train_test_split(X_train,Y_train,test_size = .2, random_state=1)\n\n print('Number of images in train: ', len(X_train))\n print(\"Distribution for train set: \", np.unique(Y_train, return_counts=True))\n print('\\n')\n\n print('Number of images in validation: ', len(X_val))\n print(\"Distribution for validation set: \", np.unique(Y_val, return_counts=True))\n print('\\n')\n\n print('Number of images in test: ', len(X_test))\n print(\"Distribution for test set: \", np.unique(Y_test, return_counts=True))\n print('\\n')\n\n # -----------------------------------\n # train\n # Read images/labels into tensor data \n train_path_ds = tf.data.Dataset.from_tensor_slices(X_train)\n train_image_ds = train_path_ds.map(PreprocessImage, num_parallel_calls=AUTOTUNE)\n\n augmentations = [flip,rotate]\n for f in augmentations:\n train_image_ds = train_image_ds.map(lambda x: tf.cond(tf.random_uniform([], 0, 1) > 0.75, lambda: f(x), lambda: x), num_parallel_calls=4)\n train_image_ds = train_image_ds.map(lambda x: tf.clip_by_value(x, 0, 1))\n\n train_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(Y_train, tf.int64))\n \n # Combine into dataset \n train_image_label_ds = tf.data.Dataset.zip((train_image_ds, train_label_ds))\n\n\n # -----------------------------------\n # validation\n # Read images/labels into tensor data \n val_path_ds = tf.data.Dataset.from_tensor_slices(X_val)\n val_image_ds = val_path_ds.map(PreprocessImage, num_parallel_calls=AUTOTUNE)\n augmentations = [flip,rotate]\n for f in augmentations:\n val_image_ds = val_image_ds.map(lambda x: tf.cond(tf.random_uniform([], 0, 1) > 0.75, lambda: f(x), lambda: x), num_parallel_calls=4)\n val_image_ds = val_image_ds.map(lambda x: tf.clip_by_value(x, 0, 1))\n val_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(Y_val, tf.int64))\n \n # Combine into dataset \n val_image_label_ds = tf.data.Dataset.zip((val_image_ds, val_label_ds))\n \n \n # -----------------------------------\n # test\n test_path_ds = tf.data.Dataset.from_tensor_slices(X_test)\n test_image_ds = test_path_ds.map(PreprocessImage, num_parallel_calls=AUTOTUNE)\n augmentations = [flip,rotate]\n for f in augmentations:\n test_image_ds = test_image_ds.map(lambda x: tf.cond(tf.random_uniform([], 0, 1) > 0.75, lambda: f(x), lambda: x), num_parallel_calls=4)\n test_image_ds = test_image_ds.map(lambda x: tf.clip_by_value(x, 0, 1))\n test_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(Y_test, tf.int64))\n \n test_image_label_ds = tf.data.Dataset.zip((test_image_ds, test_label_ds))\n \n return train_image_label_ds, val_image_label_ds, test_image_label_ds\n\n\n# In[10]:\n\n\n'''\nShuffle/batch/prefetch/Set Range\n'''\ndef ShuffleBatch(ds_dict,buff,BATCH_SIZE = 32):\n \n ds = ds_dict.shuffle(buffer_size = buff)\n ds = ds.repeat()\n ds = ds.batch(BATCH_SIZE)\n\n ds = ds.prefetch(buffer_size=AUTOTUNE)\n # ds\n\n def change_range(image,label):\n return 2*image-1, label\n\n keras_ds = ds.map(change_range)\n \n return keras_ds\n\n\n", "sub_path": "scripts/tf_test.py", "file_name": "tf_test.py", "file_ext": "py", "file_size_in_byte": 7717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.read_csv", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "5252449", "text": "import datetime\n\nfrom .. import config, util\n\nfrom ..metrics import values\nfrom ..reports import ReportTypes\nfrom ..web import base, encoder\nfrom ..web.request import AccessTypeList\nfrom ..web.errors import APIPermissionException\n\nlog = config.log\n\nclass ReportHandler(base.RequestHandler):\n \"\"\"Handles report requests\n\n To add a new report, declare a subclass of Report,\n and add it to the ReportTypes map in __init__.py\n \"\"\"\n def get_types(self):\n return AccessTypeList\n\n def get(self, report_type):\n report = self._get_report(report_type)\n\n if self.is_true('csv'):\n download_format = 'csv'\n else:\n download_format = self.get_param('download')\n\n if download_format:\n # Stream the response\n def response_handler(environ, start_response): # pylint: disable=unused-argument\n report_writer = report.get_writer(download_format)\n\n write = start_response('200 OK', [\n ('Content-Type', report_writer.get_content_type()),\n ('Content-Disposition', 'attachment; filename=\"{}\"'.format(report_writer.get_filename())),\n ('Connection', 'keep-alive')\n ])\n\n report_writer.execute(write)\n return ''\n\n return response_handler\n else:\n return report.build()\n\n def collect(self, report_type):\n report = self._get_report(report_type)\n if not report.can_collect:\n raise NotImplementedError('Report type {} does not support collection'.format(report_type))\n\n # Invoke input validation for report collection\n report.before_collect()\n\n def write_report(write):\n try:\n for item in report.collect():\n try:\n write(encoder.json_sse_pack({\n 'event': 'progress',\n 'data': item\n }))\n except Exception: # pylint: disable=broad-except\n log.info('SSE upload progress failed to send; continuing')\n\n # Log last collection time\n time_since = datetime.datetime.now() - datetime.datetime(1970, 1, 1)\n values.LAST_REPORT_COLLECTION.labels(report_type).set(time_since.total_seconds())\n except Exception: # pylint: disable=broad-except\n log.exception('Error collecting %s report data', report_type)\n values.REPORT_COLLECTION_ERROR_COUNT.labels(report_type).inc()\n\n return util.build_sse_handler(write_report)\n\n def get_availability(self, report_type):\n \"\"\"Get report availability\"\"\"\n report = self._get_report(report_type)\n if not report.has_availability:\n raise NotImplementedError('Report type {} does not provide availability'.format(report_type))\n\n return report.get_availability()\n\n def _get_report(self, report_type):\n \"\"\"Get report for report_type and validate permissions\"\"\"\n if report_type in ReportTypes:\n report_class = ReportTypes[report_type]\n report = report_class(self.request.params)\n else:\n raise NotImplementedError('Report type {} is not supported'.format(report_type))\n\n if not self.user_is_admin and not report.user_can_generate(self.uid, self.roles):\n raise APIPermissionException('User {} does not have permissions to generate report'.format(self.uid))\n\n return report\n", "sub_path": "api/reports/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 3560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "web.base.RequestHandler", "line_number": 13, "usage_type": "attribute"}, {"api_name": "web.base", "line_number": 13, "usage_type": "name"}, {"api_name": "web.request.AccessTypeList", "line_number": 20, "usage_type": "name"}, {"api_name": "web.encoder.json_sse_pack", "line_number": 60, "usage_type": "call"}, {"api_name": "web.encoder", "line_number": 60, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "metrics.values.LAST_REPORT_COLLECTION.labels", "line_number": 69, "usage_type": "call"}, {"api_name": "metrics.values.LAST_REPORT_COLLECTION", "line_number": 69, "usage_type": "attribute"}, {"api_name": "metrics.values", "line_number": 69, "usage_type": "name"}, {"api_name": "metrics.values.REPORT_COLLECTION_ERROR_COUNT.labels", "line_number": 72, "usage_type": "call"}, {"api_name": "metrics.values.REPORT_COLLECTION_ERROR_COUNT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "metrics.values", "line_number": 72, "usage_type": "name"}, {"api_name": "reports.ReportTypes", "line_number": 86, "usage_type": "name"}, {"api_name": "reports.ReportTypes", "line_number": 87, "usage_type": "name"}, {"api_name": "web.errors.APIPermissionException", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "294093757", "text": "import matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\nx = ('akash', 'udit', 'anand', 'sanjay', 'gautam')\r\ny = [45, 34, 25, 43, 50]\r\nplt.bar(x, y)\r\nplt.show()\r\n\"\"\"\r\n\r\nvalue = [34,22,15,21,16]\r\nplt.pie(value)\r\nplt.show()\r\n\r\n", "sub_path": "PythonLibraties/Library1.py", "file_name": "Library1.py", "file_ext": "py", "file_size_in_byte": 208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.pyplot.pie", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "401511727", "text": "# imageは全てtextureに合わせた形式で処理\n\nimport asyncio\nimport cv2\nimport numpy as np\nfrom kivy.app import App\nfrom kivy.clock import mainthread\nfrom kivy.graphics.texture import Texture\nfrom kivy.properties import ObjectProperty, StringProperty, ListProperty, AliasProperty, NumericProperty, BooleanProperty\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.progressbar import ProgressBar\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nimport os\nimport time\nimport threading\n\n# from logics.clip import clip_image\nfrom .logics.operation import cross, diff\nfrom .logics.matching import match_image, get_homography, match_points, detect_keypoint\nfrom .logics.warp import warp_image_liner, replace_image, warp, warp_only\nfrom .utils.file import get_save_path, get_file_ext\nfrom .utils.format import cv2tex_format, tex2cv_format\nfrom .utils.kivyevent import sleep, popup_task, forget\nfrom .utils.mixin import SelectMixin, ImageSelectMixin\nfrom .widgets.loaddialog import LoadDialog\nfrom .widgets.image import RectDrawImage\n\nIMAGE_EXT = [\"*.jpg\", \"*.png\"]\nVIDEO_EXT = [\"*.mkv\", \"*.ogv\", \"*.avi\", \"*.mov\", \"*.flv\"]\n\n\nclass SelectReferenceScreen(ImageSelectMixin, Screen):\n points = ListProperty([])\n next_state = StringProperty(\"\")\n\n def add_pixels(self, widget, *uv):\n self.points.append(uv)\n\n def remove_pixels(self):\n self.points.pop(-1)\n\n def go_next(self):\n self.manager.current = self.next_state\n\n def set_points_auto(self):\n h, w, *_ = self.cv_img.shape\n self.points = [\n [0, 0], [h, 0], [h, w], [0, w]\n ]\n\n def show_load(self, load=None, filters=IMAGE_EXT):\n super().show_load(load, filters)\n if not hasattr(self, \"next_button\"):\n self.next_button = self.ids.next_button\n self.pass_button = self.ids.pass_button\n self.next_button.disabled = True\n self.pass_button.disabled = False\n\n def activate_next(self):\n self.next_button.disabled = False\n\n\nclass SelectDestinationScreen(ImageSelectMixin, Screen):\n texture = ObjectProperty(None)\n next_state = StringProperty(\"\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.texture = Texture.create(size=(1, 1))\n\n def show_load(self, load=None, filters=IMAGE_EXT):\n super().show_load(load, filters)\n if not hasattr(self, \"next_button\"):\n self.next_button = self.ids.next_button\n self.next_button.disabled = False\n\n def go_next(self):\n self.manager.current = self.next_state\n\n\nclass SelectTargetScreen(SelectMixin, Screen):\n source = StringProperty(\"\")\n video_source = StringProperty(\"\")\n image_source = StringProperty(\"\")\n\n def set_source(self, filename):\n self.source = filename[0]\n self.dismiss_popup()\n ext = \"*\" + get_file_ext(self.source).lower()\n\n if ext in VIDEO_EXT:\n self.video_source = f\"{filename[0]}\"\n elif ext in IMAGE_EXT:\n self.image_source = f\"{filename[0]}\"\n\n\n def show_load(self, load=None, filters=VIDEO_EXT+IMAGE_EXT):\n if load is None:\n load = self.set_source\n super().show_load(load, filters)\n\n if not hasattr(self, \"next_button\"):\n self.next_button = self.ids.next_button\n self.next_button.disabled = False\n\n def go_next(self):\n self.manager.current = self.next_state\n\nclass MatchMoveWidget(Widget):\n min_match_count = NumericProperty(10)\n flann_index_kdtree = NumericProperty(0)\n video_width = NumericProperty(1024)\n is_optical = BooleanProperty(False)\n\n fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n\n algorithms = {\n \"AKAZE\" : cv2.AKAZE_create(),\n \"SIFT\" : cv2.xfeatures2d.SIFT_create()\n }\n\n gamma = 1/1.8\n gamma_cvt = np.uint8(255 * (np.linspace(0, 1, 256) ** gamma))\n def correct(self, img):\n return cv2.LUT(img, self.gamma_cvt)\n\n def save_to(self, to):\n return get_save_path(\"result\", \"matchmove\", \"eval\", to)\n\n def set_reference(self, reference, points):\n self.reference = reference\n self.points = np.array(points)\n\n def set_destination(self, dest):\n async def task():\n # h = np.sqrt(np.sum((self.points[1] - self.points[0])**2)).astype(np.int16)\n # w = np.sqrt(np.sum((self.points[2] - self.points[1])**2)).astype(np.int16)\n h, w, *_ = dest.shape\n self.destination = cv2.resize(self.correct(tex2cv_format(dest)), (w, h))\n self.reference = await popup_task(\n \"Calculating...\", \n warp,\n self.reference, \n self.points[0],\n self.points[1],\n self.points[2],\n self.points[3],\n np.array([0, 0]),\n np.array([h, 0]),\n np.array([h, w]),\n np.array([0, w]),\n h, w)\n self.reference = tex2cv_format(self.reference)\n cv2.imwrite(self.save_to(\"destination.png\"), self.destination)\n cv2.imwrite(self.save_to(\"reference.png\"), self.reference)\n self.reference = self.correct(self.reference)\n await sleep(0.333)\n forget(task())\n\n def set_target(self, source, key):\n self.source = source\n ext = \"*\" + get_file_ext(self.source)\n if ext in VIDEO_EXT:\n self.set_video_target(key)\n else:\n self.set_image_target(key)\n\n def set_image_target(self, key):\n async def task():\n try:\n folder, file = os.path.split(self.source)\n import re\n *_, typ, _ = re.split(r\"[\\._]\", file)\n corners = np.load(os.path.join(folder, f\"points_{typ}.npy\"))\n except Exception as e:\n corners = None\n print(\"no corners file:\", e)\n\n await popup_task(\n \"Calculating...\",\n self.execute_image,\n key)\n forget(task())\n\n def set_video_target(self, key):\n async def task():\n await popup_task(\n \"Calculating...\",\n self.execute_video,\n key)\n forget(task())\n\n def execute_image(self, algorithm, corners=None, typ=\"\"):\n frame = cv2.imread(self.source)\n size_h, size_w, *_ = frame.shape\n\n frame = cv2.resize(frame, (size_w, size_h))\n frame = self.correct(frame)\n\n ref_kp, ref_des = detect_keypoint(self.reference, self.algorithms[algorithm])\n tar_kp, tar_des = detect_keypoint(frame, self.algorithms[algorithm])\n \n src_pts, dst_pts, good = match_points(\n ref_kp, ref_des, \n tar_kp, tar_des,\n self.min_match_count,\n self.flann_index_kdtree)\n\n cv2.imwrite(\n self.save_to(f\"keypoints_frame_image_{algorithm}_{typ}.png\"), \n cv2.drawKeypoints(frame, tar_kp, None, flags=4))\n cv2.imwrite(\n self.save_to(f\"matches_image_{algorithm}_{len(good)}_{typ}.png\"),\n cv2.drawMatchesKnn(\n frame, tar_kp, \n self.reference, ref_kp, \n good, None,\n matchColor=(0, 255, 0), matchesMask=None,\n singlePointColor=(255, 0, 0), flags=0))\n \n if src_pts is not None or dst_pts is not None:\n # frameからreferenceの変換を取得する\n H = get_homography(src_pts, dst_pts)\n frame = replace_image(self.destination, frame, H).astype(np.uint8)\n cv2.imwrite(\n self.save_to(f\"result_{algorithm}_{typ}.png\"), \n frame)\n\n def execute_video(self, algorithm, max_speed=1):\n cap = cv2.VideoCapture(self.source)\n if not cap:\n return\n\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n size_w = self.video_width \n size_h = self.video_width * h // w\n\n writer = cv2.VideoWriter(\n self.save_to(f\"result_{algorithm}.mp4\"), \n self.fmt, fps, (size_w, size_h))\n\n ref_kp, ref_des = detect_keypoint(self.reference, self.algorithms[algorithm])\n cv2.imwrite(\n self.save_to(f\"keypoints_reference_{algorithm}.png\"), \n cv2.drawKeypoints(self.reference, ref_kp, None, flags=4))\n\n i = 0\n minh = 0\n minw = 0\n maxh = size_h\n maxw = size_w\n start = time.time()\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n t = time.time()\n print(\"\\nend process:\", (t - start) / max(i, 1))\n break\n \n frame = cv2.resize(frame, (size_w, size_h))\n frame = self.correct(frame)\n\n print(f\"\\rdesctipt frame: {i}\\t\\t\\t\\t\", end=\"\")\n tar_kp, tar_des = detect_keypoint(frame[minh:maxh, minw:maxw], self.algorithms[algorithm])\n\n h_c, w_c, *_ = frame[minh:maxh, minw:maxw].shape\n print(f\"\\rmatch frame: {i}\\t\\t\\t\\t\", end=\"\")\n src_pts, dst_pts, good = match_points(\n ref_kp, ref_des, \n tar_kp, tar_des,\n self.min_match_count,\n self.flann_index_kdtree)\n\n if i == 0:\n print(f\"\\save frame: {i}\\t\\t\\t\\t\", end=\"\")\n cv2.imwrite(\n self.save_to(f\"keypoints_frame_{algorithm}.png\"), \n cv2.drawKeypoints(frame, tar_kp, None, flags=4))\n cv2.imwrite(\n self.save_to(f\"matches_{algorithm}.png\"),\n cv2.drawMatchesKnn(\n frame, tar_kp, \n self.reference, ref_kp, \n good, None,\n matchColor=(0, 255, 0), matchesMask=None,\n singlePointColor=(255, 0, 0), flags=0))\n start = time.time()\n\n if src_pts is not None or dst_pts is not None:\n # frameからreferenceの変換を取得する\n H = get_homography(src_pts, dst_pts)\n if self.is_optical:\n replaced = warp_only(self.destination, frame, H, minh, minw)\n mask = np.sum(replaced > 0, axis=2, dtype=bool)\n\n print(f\"\\rreplace frame: {i}\\t\\t\\t\\t\", end=\"\")\n frame = np.where(mask[:,:,None], replaced, frame).astype(np.uint8)\n \n mask_id = np.array(np.where(mask))\n minh = min(np.min(mask_id[0])-max_speed, 0)\n minw = min(np.min(mask_id[1])-max_speed, 0)\n maxh = min(np.max(mask_id[0])+max_speed, size_h)\n maxw = min(np.max(mask_id[1])+max_speed, size_w)\n else:\n frame = replace_image(self.destination, frame, H).astype(np.uint8)\n\n writer.write(frame)\n i += 1\n\n writer.release()\n cap.release()\n\n\nclass MatchMoveApp(App):\n def __init__(self):\n super().__init__()\n self.title = \"Match Move\"\n\n def build(self):\n return MatchMoveWidget()\n\n\nif __name__ == '__main__':\n MatchMoveApp().run()\n", "sub_path": "src/matchmove.py", "file_name": "matchmove.py", "file_ext": "py", "file_size_in_byte": 11388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "utils.mixin.ImageSelectMixin", "line_number": 33, "usage_type": "name"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 33, "usage_type": "name"}, {"api_name": "kivy.properties.ListProperty", "line_number": 34, "usage_type": "call"}, {"api_name": "kivy.properties.StringProperty", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.mixin.ImageSelectMixin", "line_number": 64, "usage_type": "name"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 64, "usage_type": "name"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 65, "usage_type": "call"}, {"api_name": "kivy.properties.StringProperty", "line_number": 66, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture.create", "line_number": 70, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture", "line_number": 70, "usage_type": "name"}, {"api_name": "utils.mixin.SelectMixin", "line_number": 82, "usage_type": "name"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 82, "usage_type": "name"}, {"api_name": "kivy.properties.StringProperty", "line_number": 83, "usage_type": "call"}, {"api_name": "kivy.properties.StringProperty", "line_number": 84, "usage_type": "call"}, {"api_name": "kivy.properties.StringProperty", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.file.get_file_ext", "line_number": 90, "usage_type": "call"}, {"api_name": "kivy.uix.widget.Widget", "line_number": 110, "usage_type": "name"}, {"api_name": "kivy.properties.NumericProperty", "line_number": 111, "usage_type": "call"}, {"api_name": "kivy.properties.NumericProperty", "line_number": 112, "usage_type": "call"}, {"api_name": "kivy.properties.NumericProperty", "line_number": 113, "usage_type": "call"}, {"api_name": "kivy.properties.BooleanProperty", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.AKAZE_create", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d.SIFT_create", "line_number": 120, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 124, "usage_type": "call"}, {"api_name": "cv2.LUT", "line_number": 126, "usage_type": "call"}, {"api_name": "utils.file.get_save_path", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.format.tex2cv_format", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.kivyevent.popup_task", "line_number": 141, "usage_type": "call"}, {"api_name": "logics.warp.warp", "line_number": 143, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "utils.format.tex2cv_format", "line_number": 154, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 156, "usage_type": "call"}, {"api_name": "utils.kivyevent.sleep", "line_number": 158, "usage_type": "call"}, {"api_name": "utils.kivyevent.forget", "line_number": 159, "usage_type": "call"}, {"api_name": "utils.file.get_file_ext", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "re.split", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "utils.kivyevent.popup_task", "line_number": 180, "usage_type": "call"}, {"api_name": "utils.kivyevent.forget", "line_number": 184, "usage_type": "call"}, {"api_name": "utils.kivyevent.popup_task", "line_number": 188, "usage_type": "call"}, {"api_name": "utils.kivyevent.forget", "line_number": 192, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 195, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 198, "usage_type": "call"}, {"api_name": "logics.matching.detect_keypoint", "line_number": 201, "usage_type": "call"}, {"api_name": "logics.matching.detect_keypoint", "line_number": 202, "usage_type": "call"}, {"api_name": "logics.matching.match_points", "line_number": 204, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 210, "usage_type": "call"}, {"api_name": "cv2.drawKeypoints", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 213, "usage_type": "call"}, {"api_name": "cv2.drawMatchesKnn", "line_number": 215, "usage_type": "call"}, {"api_name": "logics.matching.get_homography", "line_number": 224, "usage_type": "call"}, {"api_name": "logics.warp.replace_image", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 225, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 226, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 231, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 235, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 236, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 237, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 242, "usage_type": "call"}, {"api_name": "logics.matching.detect_keypoint", "line_number": 246, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 247, "usage_type": "call"}, {"api_name": "cv2.drawKeypoints", "line_number": 249, "usage_type": "call"}, {"api_name": "time.time", "line_number": 256, "usage_type": "call"}, {"api_name": "time.time", "line_number": 260, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 264, "usage_type": "call"}, {"api_name": "logics.matching.detect_keypoint", "line_number": 268, "usage_type": "call"}, {"api_name": "logics.matching.match_points", "line_number": 272, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 280, "usage_type": "call"}, {"api_name": "cv2.drawKeypoints", "line_number": 282, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 283, "usage_type": "call"}, {"api_name": "cv2.drawMatchesKnn", "line_number": 285, "usage_type": "call"}, {"api_name": "time.time", "line_number": 291, "usage_type": "call"}, {"api_name": "logics.matching.get_homography", "line_number": 295, "usage_type": "call"}, {"api_name": "logics.warp.warp_only", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 301, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 307, "usage_type": "call"}, {"api_name": "logics.warp.replace_image", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 309, "usage_type": "attribute"}, {"api_name": "kivy.app.App", "line_number": 318, "usage_type": "name"}]} +{"seq_id": "440243282", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport xml.etree.cElementTree as ET\nimport pprint\nimport re\nimport codecs\nimport json\n\nlower = re.compile(r'^([a-z]|_)*$')\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\n\ndef organise_way_name(way_name):\n prob_names={'st':'street','st.':'street','ave':'avenue','ave.':'avenue','rd':'road','rd.':'road','center':'centre','':'N/A','None':'N/A'}\n if str(way_name) in prob_names.keys():\n way_name=prob_names[str(way_name)]\n return way_name\n\ndef organise_address(addr):\n return addr\n\ndef shape_element(element):\n dictionary = {}\n dictionary['created']={}\n dictionary['member']={}\n if element.tag == \"node\" or element.tag == \"way\" or element.tag=='relation':\n\n #organise second level key-value pairs\n for child in element:\n if child.tag=='tag':\n if 'description' not in dictionary.keys():\n dictionary['description']={}\n child_key=child.attrib['k']\n if problemchars.search(child_key):\n pass\n elif child_key=='name':\n way_name=child.attrib['v'].lower().split('(')[0].split(' ')\n way_last_name=organise_way_name(way_name[-1])\n way_name[0:-1].append(way_last_name)\n dictionary['description']['name']=' '.join(way_name)\n elif child_key.split(':')[0]=='addr' and len(child_key.split(':'))>1:\n if 'address_detail' not in dictionary['description'].keys():\n dictionary['description']['address_detail']={}\n dictionary['description']['address_detail'][str(child_key.split(':')[1])]=child.attrib['v']\n else:\n dictionary['description'][child_key]=child.attrib['v']\n elif child.tag=='nd':\n if 'member' not in dictionary.keys():\n dictionary['member']={}\n dictionary['member']['type']='node' \n if 'ref' not in dictionary['member'].keys():\n dictionary['member']['ref']=[]\n dictionary['member']['ref'].append(int(child.attrib['ref'])) \n elif child.tag=='member':\n if 'member' not in dictionary.keys():\n dictionary['member']={}\n dictionary['member']['type']=child.attrib['type']\n if 'role' in child.attrib.keys():\n dictionary['member']['role']=child.attrib['role']\n if 'ref' not in dictionary['member'].keys():\n dictionary['member']['ref']=[]\n dictionary['member']['ref'].append(int(child.attrib['ref']))\n # organise first level key-value pairs\n for key in element.attrib.keys():\n \n if key=='timestamp' or key=='user':\n dictionary['created'][key]=element.attrib[key]\n elif key=='changeset' or key=='uid':\n dictionary['created'][key]=int(element.attrib[key])\n elif key=='version':\n dictionary['created'][key]=float(element.attrib[key])\n elif key in ['lon','lat']:\n dictionary['pos']=[float(element.attrib['lat']),float(element.attrib['lon'])]\n else:\n dictionary[key]=element.attrib[key]\n dictionary['type']=element.tag\n return dictionary\n else:\n return None\n\n\ndef process_map(file_in, pretty = False):\n file_out = \"{0}.json\".format(file_in)\n with codecs.open(file_out, \"w\") as fo:\n for _, element in ET.iterparse(file_in):\n el = shape_element(element)\n if el:\n if pretty:\n fo.write(json.dumps(el, indent=2)+\"\\n\")\n # clear the element to save memory\n element.clear()\n else:\n fo.write(json.dumps(el) + \"\\n\")\n element.clear()\n\n\n\nif __name__ == \"__main__\":\n process_map('london.osm')\n", "sub_path": "produce_json.py", "file_name": "produce_json.py", "file_ext": "py", "file_size_in_byte": 4103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "re.compile", "line_number": 9, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 11, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 84, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree.iterparse", "line_number": 85, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 85, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "160029827", "text": "\r\nimport cv2\r\n# staring camera\r\ncap=cv2.VideoCapture(0)\r\nwhile cap.isOpened() :\r\n status,frame=cap.read()\r\n # converting image frame into gray scale \r\n grayimg=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) \r\n print(frame.shape)\r\n # text writing \r\n font = cv2.FONT_HERSHEY_SIMPLEX # this font type \r\n cv2.putText(frame,'MUKUL',(10,50), font, 2,(0,2,55),2,cv2.LINE_AA)\r\n cv2.imshow('live',frame)\r\n #cv2.imshow('livegray',grayimg)\r\n if cv2.waitKey(10) & 0xff == ord('q') :\r\n break\r\n#cv2.destroyWindow('live')\r\ncv2.destroyAllWindows() # this will destroy all windows \r\n# to close camera\r\ncap.release()\r\n", "sub_path": "Face_detection2.py", "file_name": "Face_detection2.py", "file_ext": "py", "file_size_in_byte": 657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "444870509", "text": "from flask import Blueprint, request, url_for, render_template, make_response, jsonify\nfrom app.auth.helper import response, response_auth, token_required\nfrom app.workspace.helper import workspace_access_required\nfrom app.project.helper import project_access_required, response_with_id\nfrom app.models.bot import Bot\nfrom app import logger\nimport os\nimport json\nfrom app.marketplace.helper import response_with_obj\n\nmarketplace = Blueprint('marketplace', __name__)\n\n@marketplace.route('/marketplace/catalog', methods=['GET'])\n@token_required\n@project_access_required\ndef get(current_user, workspaceId, projectId):\n \"\"\"\n Get list of bots available in the catalog\n \"\"\"\n\n filterObj = request.args.get(\"filter\")\n bots = Bot.get_catalog(filterObj)\n payload = []\n\n if bots:\n for bot in bots:\n payload.append({\n 'id': bot._id,\n 'name': bot.name,\n 'description': bot.description,\n 'price' : float(bot.price),\n 'tags' : bot.tags,\n 'marketplaceCardMediaUrl' : bot.marketplaceCardMediaUrl\n })\n print(payload)\n \n return response_with_obj(\"success\", \"Successfully retrieved catalog.\", payload, 200)\n\n\n@marketplace.route('/marketplace/tags', methods=['GET'])\n@token_required\n@project_access_required\ndef get_tags(current_user, workspaceId, projectId):\n \"\"\"\n Get list of all tags\n \"\"\"\n\n filterObj = request.args.get(\"filter\")\n tags = Bot.get_tags(filterObj)\n payload = []\n \n return make_response(jsonify({\n 'status': \"success\",\n 'message': \"Got the tags\",\n 'tags': tags\n })), 200\n\n\n@marketplace.route('/marketplace/bot', methods=['GET'])\n@token_required\n@project_access_required\ndef get_bot(current_user, workspaceId, projectId):\n \"\"\"\n Get list of all tags\n \"\"\"\n\n botId = request.args.get(\"id\")\n botObj = Bot.get_by_id_no_template(botId)\n payload = []\n \n return make_response(jsonify({\n 'status': \"success\",\n 'message': \"Retrieved Bot Object\",\n 'bot': botObj\n })), 200\n\n\n@marketplace.route('/marketplace/search', methods=['POST'])\n@token_required\n@project_access_required\ndef search(current_user, workspaceId, projectId):\n \"\"\"\n Search Marketplace\n \"\"\"\n query = request.json.get(\"query\", None)\n filter_obj = request.json.get(\"filter\", None)\n pageNum = int(request.args.get('pageNum', 1))\n itemsPerPage = int(request.args.get('itemsPerPage', 10))\n\n print(\"Query and Filter:\", query, filter_obj)\n \n bots_list = Bot.search_bots(query, filter_obj, pageNum, itemsPerPage, projectId)\n # totalItems = Bot.get_total(projectId, query=query, filter_obj=filter_obj)\n payload = []\n print(len(bots_list))\n\n for bot in bots_list:\n payload.append({\n 'id': bot._id,\n 'name': bot.name,\n 'description': bot.description,\n 'price' : float(bot.price),\n 'tags' : bot.tags,\n 'marketplaceCardMediaUrl' : bot.marketplaceCardMediaUrl\n })\n \n return response_with_obj(\"success\", \"Successfully retrieved catalog.\", payload, 200)", "sub_path": "app/marketplace/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 3169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Blueprint", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "app.models.bot.Bot.get_catalog", "line_number": 22, "usage_type": "call"}, {"api_name": "app.models.bot.Bot", "line_number": 22, "usage_type": "name"}, {"api_name": "app.marketplace.helper.response_with_obj", "line_number": 37, "usage_type": "call"}, {"api_name": "app.auth.helper.token_required", "line_number": 14, "usage_type": "name"}, {"api_name": "app.project.helper.project_access_required", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "app.models.bot.Bot.get_tags", "line_number": 49, "usage_type": "call"}, {"api_name": "app.models.bot.Bot", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 52, "usage_type": "call"}, {"api_name": "app.auth.helper.token_required", "line_number": 41, "usage_type": "name"}, {"api_name": "app.project.helper.project_access_required", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "app.models.bot.Bot.get_by_id_no_template", "line_number": 68, "usage_type": "call"}, {"api_name": "app.models.bot.Bot", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}, {"api_name": "app.auth.helper.token_required", "line_number": 60, "usage_type": "name"}, {"api_name": "app.project.helper.project_access_required", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "app.models.bot.Bot.search_bots", "line_number": 92, "usage_type": "call"}, {"api_name": "app.models.bot.Bot", "line_number": 92, "usage_type": "name"}, {"api_name": "app.marketplace.helper.response_with_obj", "line_number": 107, "usage_type": "call"}, {"api_name": "app.auth.helper.token_required", "line_number": 79, "usage_type": "name"}, {"api_name": "app.project.helper.project_access_required", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "590892157", "text": "\nimport json\n\nfrom requests_debugger import requests\n#import requests\nfrom noms.client.dict_parse import search_parse\nfrom noms.client.dict_parse import food_parse\nfrom noms.client.searchresults import SearchResults\nfrom noms.objects.nutrient_dict import *\nimport pprint\n\nclass Client:\n \"\"\"\n The Client class is used to interface with the USDA Standard Reference Database\n API. It must be initialized with an API key.\n \"\"\"\n\n url = 'https://api.nal.usda.gov/usda/ndb'\n url = 'https://api.nal.usda.gov/fdc/v1'\n\n\n\n def __init__(self, key):\n \"\"\"\n A Client instance must be initialized with a key from\n data.gov. This is free to obtain, and you can request one\n here: https://api.data.gov/signup/\n \"\"\"\n self.key = key.strip()\n\n\n def call_post(self, params, url_suffix):\n \"\"\" target_url could be:\n https://api.nal.usda.gov/usda/ndb/V2/reports\n https://api.nal.usda.gov/usda/ndb/search\n depending on which service of the api is being used\n \"\"\"\n target_url = self.url + url_suffix\n params[\"api_key\"] = self.key\n response = json.loads(requests.post(url=target_url, params=params).text)\n if 'error' in response.keys() and response['status'] is not 200:\n raise Exception(response)\n return response\n\n def call(self, params, url_suffix):\n \"\"\" target_url could be:\n https://api.nal.usda.gov/usda/ndb/V2/reports\n https://api.nal.usda.gov/usda/ndb/search\n depending on which service of the api is being used\n \"\"\"\n target_url = self.url + url_suffix\n params[\"api_key\"] = self.key\n response = json.loads(requests.get(url=target_url, params=params).text)\n\n # seems like normal API operation\n if type(response) == list:\n return {'foods': response}\n\n # if nothing found there is None type\n if response is None:\n return {'foods': []}\n\n # error handling: return type dict with error details\n if 'error' in response.keys() and response['status'] != 200:\n raise Exception(response)\n\n def search_query(self, query, dataType=None):\n params = dict(\n query=query\n )\n #result = search_parse(self.call(params,'/foods/search'))\n #pprint.pprint(result['items'])\n return SearchResults(search_parse(self.call(params, '/foods/search')))\n\n def food_query(self, ids):\n # allow for either a single id (ndbno) query, or a list of queries\n if type(ids) == list:\n if len(ids) > 25:\n raise Exception(\"Too many Food ID arguments. API limits it to 25.\")\n params = dict(fdcIds=ids)\n # params.update(dict(type='f', format='json'))\n return_obj = self.call(params, '/foods')\n offset = 0\n return return_obj\n if 'foods' not in return_obj:\n raise Exception(\"No 'foods' index.\\nSee the following error: {}\".format(return_obj))\n return None\n for i in range(0, len(return_obj[\"foods\"])):\n if 'error' in return_obj[\"foods\"][i-offset].keys():\n del return_obj[\"foods\"][i-offset]\n offset += 1\n return return_obj\n\n def get_foods(self, id_value_dict):\n # If more than 25 words are being queried, split it up\n if len(id_value_dict.keys()) > 25:\n print(\"Must call the database {} times, this may take a couple moments. Status: {leng}/{leng}\".format(len(id_value_dict.keys())//25+1,leng=len(id_value_dict.keys())))\n dict_copy = id_value_dict.copy()\n food_obj = []\n while len(dict_copy.keys()) > 25:\n current_dict = {}\n items = islice(dict_copy.items(), 25)\n current_dict.update(items)\n call = self.food_query(current_dict.keys())\n food_obj += food_parse(call, nutrient_dict, list(current_dict.values()))\n for key in current_dict.keys():\n del dict_copy[key]\n print(\"Status: {}/{}\".format(len(dict_copy.keys()), len(id_value_dict.keys())))\n call = self.food_query(dict_copy.keys())\n food_obj += food_parse(call, nutrient_dict, list(dict_copy.values()))\n print(\"Complete!\")\n else:\n food_obj = self.food_query(id_value_dict.keys())\n food_obj = food_parse(food_obj, nutrient_dict, list(id_value_dict.values()))\n return food_obj\n", "sub_path": "noms/client/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 4505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "requests_debugger.requests.post", "line_number": 40, "usage_type": "call"}, {"api_name": "requests_debugger.requests", "line_number": 40, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 53, "usage_type": "call"}, {"api_name": "requests_debugger.requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "requests_debugger.requests", "line_number": 53, "usage_type": "name"}, {"api_name": "noms.client.searchresults.SearchResults", "line_number": 73, "usage_type": "call"}, {"api_name": "noms.client.dict_parse.search_parse", "line_number": 73, "usage_type": "call"}, {"api_name": "noms.client.dict_parse.food_parse", "line_number": 105, "usage_type": "call"}, {"api_name": "noms.client.dict_parse.food_parse", "line_number": 110, "usage_type": "call"}, {"api_name": "noms.client.dict_parse.food_parse", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "303389879", "text": "import skimage\nimport selectivesearch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport re\n\nimg_width = 120\nimg_height = 120\nlabels = [\n 'jpg_aeroplane'\n 'jpg_bicycle',\n 'jpg_bird',\n 'jpg_boat',\n 'jpg_bottle',\n 'jpg_bus',\n 'jpg_car',\n 'jpg_cat',\n 'jpg_chair',\n 'jpg_cow',\n 'jpg_diningtable',\n 'jpg_dog',\n 'jpg_horse',\n 'jpg_motorbike',\n 'jpg_person',\n 'jpg_pottedplant',\n 'jpg_sheep',\n 'jpg_sofa',\n 'jpg_train',\n 'jpg_tvmonitor',\n]\n\n\ndef get_onehots():\n length = len(labels)\n ret = []\n for i in range(length, -1, -1):\n ret.append([0] * (length - i) + [1] + [0] * i)\n return np.array(ret)\n\n\ndef get_train_data(img_name):\n plt.ion()\n # load some image from our data set\n img = skimage.io.imread('NC_data/train/{}.jpg'.format(img_name))\n # build up sequences to crop each region by, and\n f = open('NC_data/train/{}.txt'.format(img_name))\n crop_dict = {}\n for line in f.readlines():\n if not re.match(r'^.*,1\\s0', line):\n parts = line.split(',')\n crop_dict[parts[0].split('.')[1]] = _build_seq(parts[1])\n region = _get_subimage(np.reshape(img, [400*400, 3]), list(crop_dict.values())[0])\n plt.imshow(skimage.transform.resize(region, (img_width, img_height)))\n plt.show(block=True)\n\n\ndef get_proposed_regions(img_name):\n ret = np.array()\n # load some image from our dataset\n img = skimage.io.imread('NC_data/test/{}'.format(img_name))\n # perform selective search\n img_lbl, regions = selectivesearch.selective_search(\n img, scale=400, sigma=0.8, min_size=400)\n candidates = set([r['rect'] for r in regions])\n for x, y, w, h in candidates:\n ret.put(skimage.transform.resize(img[x:x+h, y:y+h], (img_width, img_height)))\n return ret\n\n\ndef _build_seq(s):\n vals = [eval(x) for x in s.split(' ') if x.isdigit()]\n ret = []\n for islice in range(0, len(vals)-1, 2):\n ret.append((vals[islice], vals[islice] + vals[islice+1]))\n return ret\n\n\ndef _get_subimage(full_image, slices):\n ret = []\n for sl in slices:\n chunk = full_image[sl[0]:sl[1]+1]\n ret.extend(chunk)\n return np.array(ret)\n", "sub_path": "Documents/Neural/Python Code/data_helpers.py", "file_name": "data_helpers.py", "file_ext": "py", "file_size_in_byte": 2193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 44, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 44, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "skimage.transform.resize", "line_number": 53, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 53, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 60, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 60, "usage_type": "attribute"}, {"api_name": "selectivesearch.selective_search", "line_number": 62, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 66, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "101890212", "text": "import logging\nimport os\n\nimport pandas as pd\nimport pandas_datareader as pdr\nfrom dotenv import find_dotenv, load_dotenv\nfrom fredapi import Fred\nimport db_helper as db\n\ndef main():\n fred_api_key = os.environ.get('FRED_API_KEY')\n fred = Fred(api_key=fred_api_key)\n\n db = db.DB_Helper(os.environ.get('DB_PATH'))\n\n ticker = 'CYS'\n series_ids = ['SP500', 'DGS10', 'DGS5', 'USD3MTD156N', 'USD1WKD156N', 'FF']\n \n # Ticker download\n if db.have_data_for_series(ticker):\n logging.info('have data for {0} - not downloading'.format(ticker))\n else:\n ds = pdr.data.DataReader(ticker, 'yahoo')\n db.add_data_series_and_data_yahoo(ds, ticker)\n logging.info('Added data for %s', ticker)\n\n # Series download\n for id in series_ids:\n if db.have_data_for_series(id):\n logging.info('have data for {0} - not downloading'.format(id))\n else:\n ds_meta = fred.get_series_info(id)\n db.add_data_series_fred(ds_meta)\n \n ds_data = fred.get_series(id)\n db.add_data_fred(ds_data, id)\n logging.info('added data for %s', id)\n \n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n # project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n", "sub_path": "src/data/download_dataset.py", "file_name": "download_dataset.py", "file_ext": "py", "file_size_in_byte": 1639, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "fredapi.Fred", "line_number": 12, "usage_type": "call"}, {"api_name": "db_helper.DB_Helper", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "db_helper.have_data_for_series", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas_datareader.data.DataReader", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas_datareader.data", "line_number": 23, "usage_type": "attribute"}, {"api_name": "db_helper.add_data_series_and_data_yahoo", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 25, "usage_type": "call"}, {"api_name": "db_helper.have_data_for_series", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 30, "usage_type": "call"}, {"api_name": "db_helper.add_data_series_fred", "line_number": 33, "usage_type": "call"}, {"api_name": "db_helper.add_data_fred", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 42, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 49, "usage_type": "call"}, {"api_name": "dotenv.find_dotenv", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "122176984", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: M:\\Programming\\Project\\django-auth\\auth\\CustomAuth\\models\\finance_mixin.py\n# Compiled at: 2019-12-11 09:45:43\n# Size of source mod 2**32: 268 bytes\nfrom django.db import models\nimport django.utils.translation as _\n\nclass FinanceMixin(models.Model):\n wallet = models.PositiveIntegerField((_('Credit of user')),\n default=0)\n\n class Meta:\n abstract = True", "sub_path": "pycfiles/django_custom_user_models-0.2.5-py3-none-any/finance_mixin.cpython-37.py", "file_name": "finance_mixin.cpython-37.py", "file_ext": "py", "file_size_in_byte": 528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.utils.translation", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "73723274", "text": "import glob\nimport importlib.util\nimport logging\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nfrom falconcv.models import ApiInstaller\nfrom falconcv.util import FileUtil\n\nlogger = logging.getLogger(__name__)\n\n\nclass TFObjectDetectionAPI(ApiInstaller):\n def __init__(self):\n super(TFObjectDetectionAPI, self).__init__()\n self.repo_uri = \"https://github.com/haruiz/models.git\"\n #\"https://github.com/tensorflow/models.git\"\n self._package_name = \"object_detection\"\n\n\n def install(self):\n try:\n super(TFObjectDetectionAPI, self).install()\n self._protobuf_comp()\n research_folder = self.repo_folder.joinpath(\"research\")\n slim_folder = research_folder.joinpath(\"slim\")\n if importlib.util.find_spec(self._package_name) is None:\n logger.debug(\"Installing Api\")\n with FileUtil.workon(str(research_folder)):\n os.system(\"python setup.py build\")\n os.system(\"python setup.py install\")\n logger.debug(\"Api installation done\")\n sys.path.append(str(research_folder))\n sys.path.append(str(slim_folder))\n os.environ['PATH'] += \"{}{}{}\".format(str(research_folder), os.pathsep, str(slim_folder))\n except Exception as ex:\n logger.error(\"Error installing the package : {}\".format(ex))\n\n def _protobuf_comp(self):\n research_folder = self.repo_folder.joinpath(\"research\")\n protos_folder = research_folder.joinpath(\"object_detection\", \"protos\")\n protos_files = glob.glob(\"{}/*.proto\".format(str(protos_folder)))\n for abs_file_path in protos_files:\n file_name = Path(abs_file_path).name\n rel_file_path = \"object_detection/protos/{}\".format(file_name)\n p = subprocess.Popen(\n ['protoc', rel_file_path, \"--python_out=.\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n cwd=str(research_folder))\n output = p.stdout.readlines()\n error = p.stderr.readlines()\n if error:\n raise IOError(error)\n if output:\n logger.debug(output)\n p.wait()\n\n\nif __name__ == '__main__':\n api = TFObjectDetectionAPI()\n api.install()\n", "sub_path": "falconcv/models/tf/api_installer.py", "file_name": "api_installer.py", "file_ext": "py", "file_size_in_byte": 2388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "falconcv.models.ApiInstaller", "line_number": 15, "usage_type": "name"}, {"api_name": "importlib.util.util.find_spec", "line_number": 29, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 29, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 29, "usage_type": "name"}, {"api_name": "falconcv.util.FileUtil.workon", "line_number": 31, "usage_type": "call"}, {"api_name": "falconcv.util.FileUtil", "line_number": 31, "usage_type": "name"}, {"api_name": "os.system", "line_number": 32, "usage_type": "call"}, {"api_name": "os.system", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 37, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 44, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 48, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 50, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "81837726", "text": "#!/usr/bin/python3\n\nimport argparse\n\nfrom .bqclient import BqClient\n\n\ndef main():\n \"\"\"\n BigQuery client test\n \"\"\"\n\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-a\", \"--auth_type\", type=str, help=\"Authentication type\", choices=['service_account'], default='service_account')\n parser.add_argument(\"-k\", \"--key\", type=str, help=\"Json private key path\", required=True)\n parser.add_argument(\"-q\", \"--query\", type=str, help=\"Query to run\", required=True)\n args = parser.parse_args()\n\n # BqClient instance\n bq = BqClient()\n\n # Authentication\n bq.setClient(args.key)\n print(\" * BigQuery client instance:\")\n print(bq.getClient())\n\n # Run query\n bq.runQuery(args.query)\n print(\" * Query instance:\")\n print(bq.getQueryJob())\n\n # Print the results.\n print(\" * Query results:\")\n for row in bq.readResult():\n print(row)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/bqclient_test.py", "file_name": "bqclient_test.py", "file_ext": "py", "file_size_in_byte": 955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "bqclient.BqClient", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "473517552", "text": "import galsim\nimport numpy\nimport os\n\nclass CosmosSampler(object):\n _req_params = {}\n _opt_params = { 'min_r50' : float, 'max_r50': float,\n 'min_flux' : float, 'max_flux': float,\n 'kde_factor' : float }\n _single_params = []\n _takes_rng = True\n\n def __init__(self, min_r50=0.05, max_r50=2.0, min_flux=0.5, max_flux=100,\n kde_factor=0.01, rng=None):\n # Make sure required dependencies are checked right away, so the user gets timely\n # feedback of what this code requires.\n import scipy\n import fitsio\n self.r50_range = (min_r50, max_r50)\n self.flux_range = (min_flux, max_flux)\n\n self.r50_sanity_range=0.05,2.0\n self.flux_sanity_range=0.5,100.0\n self.kde_factor=kde_factor\n\n self._load_data()\n self._make_kde()\n self._rng = galsim.BaseDeviate(rng)\n\n def sample(self, size=None):\n \"\"\"\n get [r50, flux] or [:, r50_flux]\n \"\"\"\n if size is None:\n size=1\n is_scalar=True\n else:\n is_scalar=False\n\n r50min,r50max=self.r50_range\n fmin,fmax=self.flux_range\n\n data=numpy.zeros( (size,2) )\n\n ngood=0\n nleft=data.shape[0]\n numpy.random.seed(self._rng.raw())\n while nleft > 0:\n r=self.kde.resample(size=nleft).T\n\n w,=numpy.where( (r[:,0] > r50min) &\n (r[:,0] < r50max) &\n (r[:,1] > fmin) &\n (r[:,1] < fmax)\n )\n\n if w.size > 0:\n data[ngood:ngood+w.size,:] = r[w,:]\n ngood += w.size\n nleft -= w.size\n\n if is_scalar:\n data=data[0,:]\n\n return data\n\n def _load_data(self):\n import fitsio\n fname='real_galaxy_catalog_25.2_fits.fits'\n fname=os.path.join(\n #sys.exec_prefix,\n #'share',\n #'galsim',\n galsim.meta_data.share_dir,\n 'COSMOS_25.2_training_sample',\n fname,\n )\n\n r50min,r50max=self.r50_sanity_range\n fmin,fmax=self.flux_sanity_range\n\n alldata=fitsio.read(fname, lower=True)\n w,=numpy.where(\n (alldata['viable_sersic']==1) &\n (alldata['hlr'][:,0] > r50min) &\n (alldata['hlr'][:,0] < r50max) &\n (alldata['flux'][:,0] > fmin) &\n (alldata['flux'][:,0] < fmax)\n )\n\n self.alldata=alldata[w]\n\n def _make_kde(self):\n import scipy.stats\n\n data=numpy.zeros( (self.alldata.size, 2) )\n data[:,0] = self.alldata['hlr'][:,0]\n data[:,1] = self.alldata['flux'][:,0]\n\n self.kde=scipy.stats.gaussian_kde(\n data.transpose(),\n bw_method=self.kde_factor,\n )\n\ndef CosmosR50Flux(config, base, name):\n # Get the current values of index_key and rng in the base dict.\n orig_index_key = base.get('index_key',None)\n orig_rng = base.get('rng',None)\n\n # This may change the values of base['index_key'] and base['rng']\n try:\n index, index_key = galsim.config.GetIndex(config, base)\n except AttributeError:\n # The old syntax prior to GalSim v1.5\n index, index_key = galsim.config.value._get_index(config, base, False)\n\n if base.get('_cosmos_sampler_index',None) != index:\n cosmos_sampler = galsim.config.GetInputObj('cosmos_sampler', config, base, name)\n r50, flux = cosmos_sampler.sample()\n base['_cosmos_sampler_r50'] = r50\n base['_cosmos_sampler_flux'] = flux\n base['_cosmos_sampler_index'] = index\n else:\n r50 = base['_cosmos_sampler_r50']\n flux = base['_cosmos_sampler_flux']\n\n # Reset these values back if necessary.\n if orig_index_key is not None:\n base['index_key'] = orig_index_key\n if orig_rng is not None:\n base['rng'] = orig_rng\n\n return float(r50), float(flux)\n\n\ndef CosmosR50(config, base, value_type):\n r50, flux = CosmosR50Flux(config,base,'CosmosR50')\n return r50, False\n\ndef CosmosFlux(config, base, value_type):\n r50, flux = CosmosR50Flux(config,base,'CosmosFlux')\n return flux, False\n\ngalsim.config.RegisterInputType('cosmos_sampler', galsim.config.InputLoader(CosmosSampler))\ngalsim.config.RegisterValueType('CosmosR50', CosmosR50, [float], input_type='cosmos_sampler')\ngalsim.config.RegisterValueType('CosmosFlux', CosmosFlux, [float], input_type='cosmos_sampler')\n\n\n", "sub_path": "galsim_extra/cosmos_sampler.py", "file_name": "cosmos_sampler.py", "file_ext": "py", "file_size_in_byte": 4502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "galsim.BaseDeviate", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "galsim.meta_data", "line_number": 74, "usage_type": "attribute"}, {"api_name": "fitsio.read", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "scipy.stats.gaussian_kde", "line_number": 100, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 100, "usage_type": "attribute"}, {"api_name": "galsim.config.GetIndex", "line_number": 112, "usage_type": "call"}, {"api_name": "galsim.config", "line_number": 112, "usage_type": "attribute"}, {"api_name": "galsim.config.value._get_index", "line_number": 115, "usage_type": "call"}, {"api_name": "galsim.config", "line_number": 115, "usage_type": "attribute"}, {"api_name": "galsim.config.GetInputObj", "line_number": 118, "usage_type": "call"}, {"api_name": "galsim.config", "line_number": 118, "usage_type": "attribute"}, {"api_name": "galsim.config.RegisterInputType", "line_number": 144, "usage_type": "call"}, {"api_name": "galsim.config", "line_number": 144, "usage_type": "attribute"}, {"api_name": "galsim.config.InputLoader", "line_number": 144, "usage_type": "call"}, {"api_name": "galsim.config.RegisterValueType", "line_number": 145, "usage_type": "call"}, {"api_name": "galsim.config", "line_number": 145, "usage_type": "attribute"}, {"api_name": "galsim.config.RegisterValueType", "line_number": 146, "usage_type": "call"}, {"api_name": "galsim.config", "line_number": 146, "usage_type": "attribute"}]} +{"seq_id": "46958520", "text": "# -*- coding: utf-8 -*-\n# Copyright 2015 Objectif Libre\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# @author: Stéphane Albert\n#\nimport copy\nimport decimal\n\nimport six\n\nfrom cloudkitty import utils as ck_utils\n\nTENANT = 'f266f30b11f246b589fd266f85eeec39'\nINITIAL_TIMESTAMP = 1420070400\nFIRST_PERIOD_BEGIN = INITIAL_TIMESTAMP\nFIRST_PERIOD_BEGIN_ISO = ck_utils.ts2iso(FIRST_PERIOD_BEGIN)\nFIRST_PERIOD_END = FIRST_PERIOD_BEGIN + 3600\nFIRST_PERIOD_END_ISO = ck_utils.ts2iso(FIRST_PERIOD_END)\nSECOND_PERIOD_BEGIN = FIRST_PERIOD_END\nSECOND_PERIOD_BEGIN_ISO = ck_utils.ts2iso(SECOND_PERIOD_BEGIN)\nSECOND_PERIOD_END = SECOND_PERIOD_BEGIN + 3600\nSECOND_PERIOD_END_ISO = ck_utils.ts2iso(SECOND_PERIOD_END)\nDEMO_TENANT = '1456f30b11f2414789fd266f855ee894'\n\nCOMPUTE_METADATA = {\n 'availability_zone': 'nova',\n 'flavor': 'm1.nano',\n 'image_id': 'f5600101-8fa2-4864-899e-ebcb7ed6b568',\n 'instance_id': '26c084e1-b8f1-4cbc-a7ec-e8b356788a17',\n 'memory': '64',\n 'metadata': {\n 'farm': 'prod'\n },\n 'name': 'prod1',\n 'project_id': 'f266f30b11f246b589fd266f85eeec39',\n 'user_id': '55b3379b949243009ee96972fbf51ed1',\n 'vcpus': '1'}\n\nIMAGE_METADATA = {\n 'checksum': '836c69cbcd1dc4f225daedbab6edc7c7',\n 'container_format': 'aki',\n 'created_at': '2014-06-04T16:26:01',\n 'deleted': 'False',\n 'deleted_at': 'None',\n 'disk_format': 'aki',\n 'is_public': 'True',\n 'min_disk': '0',\n 'min_ram': '0',\n 'name': 'cirros-0.3.2-x86_64-uec-kernel',\n 'protected': 'False',\n 'size': '4969360',\n 'status': 'active',\n 'updated_at': '2014-06-04T16:26:02'}\n\nFIRST_PERIOD = {\n 'begin': FIRST_PERIOD_BEGIN,\n 'end': FIRST_PERIOD_END}\n\nSECOND_PERIOD = {\n 'begin': SECOND_PERIOD_BEGIN,\n 'end': SECOND_PERIOD_END}\n\nCOLLECTED_DATA = [{\n 'period': FIRST_PERIOD,\n 'usage': {\n 'compute': [{\n 'desc': COMPUTE_METADATA,\n 'vol': {\n 'qty': decimal.Decimal(1.0),\n 'unit': 'instance'}}],\n 'image': [{\n 'desc': IMAGE_METADATA,\n 'vol': {\n 'qty': decimal.Decimal(1.0),\n 'unit': 'image'}}]\n }}, {\n 'period': SECOND_PERIOD,\n 'usage': {\n 'compute': [{\n 'desc': COMPUTE_METADATA,\n 'vol': {\n 'qty': decimal.Decimal(1.0),\n 'unit': 'instance'}}]\n }}]\n\n# data input for invoice\n# demo user data\nINVOICE_DATA_DEMO = [{\n 'id': '1',\n 'invoice_date': FIRST_PERIOD_BEGIN,\n 'invoice_period_from': FIRST_PERIOD_BEGIN,\n 'invoice_period_to': FIRST_PERIOD_END,\n 'invoice_id': 'demo-5-2016',\n 'total_cost': '6.64',\n 'paid_cost': '0.11',\n 'balance_cost': '6.53',\n 'invoice_data': {\n\t\t'dict_all_cost_total': '6.64',\n\t\t'dict_cloud_storage': '4.00',\n\t\t'dict_volume': '2.64'},\n 'tenant_id': DEMO_TENANT,\n 'payment_status': '0',\n 'tenant_name': 'demo'\n }]\n\n# demo user data to compare\nINVOICE_DATA_DEMO_COMPARE = [{\n 'id': 1,\n 'invoice_date': '2015-01-01T00:00:00Z',\n 'invoice_period_from': '2015-01-01T00:00:00Z',\n 'invoice_period_to': '2015-01-01T01:00:00Z',\n 'invoice_id': u'demo-5-2016',\n 'total_cost': '6.64',\n 'paid_cost': '0.11',\n 'balance_cost': '6.53',\n 'invoice_data': {\n u'dict_all_cost_total': u'6.64',\n u'dict_cloud_storage': u'4.00',\n u'dict_volume': u'2.64'},\n 'tenant_id': u'1456f30b11f2414789fd266f855ee894',\n 'payment_status': 0,\n 'tenant_name': u'demo'\n }]\n\n# data input\n# admin user data\nINVOICE_DATA_ADMIN = [{\n 'id': '2',\n 'invoice_date': FIRST_PERIOD_BEGIN,\n 'invoice_period_from': FIRST_PERIOD_BEGIN,\n 'invoice_period_to': FIRST_PERIOD_END,\n 'invoice_id': 'admin-5-2016',\n 'total_cost': '1.64',\n 'paid_cost': '0.11',\n 'balance_cost': '1.53',\n 'invoice_data': {\n 'dict_all_cost_total': '1.64',\n 'dict_cloud_storage': '0.64',\n 'dict_volume': '1.00'},\n 'tenant_id': TENANT,\n 'payment_status': '1',\n 'tenant_name': 'admin'\n }]\n\n# admin user data to compare\nINVOICE_DATA_ADMIN_COMPARE = [{\n 'id': 2,\n 'invoice_date': '2015-01-01T00:00:00Z',\n 'invoice_period_from': '2015-01-01T00:00:00Z',\n 'invoice_period_to': '2015-01-01T01:00:00Z',\n 'invoice_id': u'admin-5-2016',\n 'total_cost': '1.64',\n 'paid_cost': '0.11',\n 'balance_cost': '1.53',\n 'invoice_data': {\n u'dict_all_cost_total': u'1.64',\n u'dict_cloud_storage': u'0.64',\n u'dict_volume': u'1.00'},\n 'tenant_id': u'f266f30b11f246b589fd266f85eeec39',\n 'payment_status': 1,\n 'tenant_name': u'admin'\n }]\n\n# all invoice data to compare\nALL_INVOICES = INVOICE_DATA_DEMO_COMPARE + INVOICE_DATA_ADMIN_COMPARE\n\n# making the data as dict\ndef invoice_data(data):\n\n for invoice in data:\n\n invoice_dict = invoice\n\n return invoice_dict\n\n# dict with invoice data \nINVOICE_DICT_DEMO = invoice_data(INVOICE_DATA_DEMO)\nINVOICE_DICT_ADMIN = invoice_data(INVOICE_DATA_ADMIN)\n\nRATED_DATA = copy.deepcopy(COLLECTED_DATA)\nRATED_DATA[0]['usage']['compute'][0]['rating'] = {\n 'price': decimal.Decimal('0.42')}\nRATED_DATA[0]['usage']['image'][0]['rating'] = {\n 'price': decimal.Decimal('0.1337')}\nRATED_DATA[1]['usage']['compute'][0]['rating'] = {\n 'price': decimal.Decimal('0.42')}\n\n\ndef split_storage_data(raw_data):\n\n final_data = []\n for frame in raw_data:\n frame['period']['begin'] = ck_utils.ts2iso(frame['period']['begin'])\n frame['period']['end'] = ck_utils.ts2iso(frame['period']['end'])\n usage_buffer = frame.pop('usage')\n # Sort to have a consistent result as we are converting it to a list\n for service, data in sorted(six.iteritems(usage_buffer)):\n new_frame = copy.deepcopy(frame)\n new_frame['usage'] = {service: data}\n new_frame['usage'][service][0]['tenant_id'] = TENANT\n final_data.append(new_frame)\n return final_data\n\n\n# FIXME(sheeprine): storage is not using decimal for rates, we need to\n# transition to decimal.\nSTORED_DATA = copy.deepcopy(COLLECTED_DATA)\nSTORED_DATA[0]['usage']['compute'][0]['rating'] = {\n 'price': 0.42}\nSTORED_DATA[0]['usage']['image'][0]['rating'] = {\n 'price': 0.1337}\nSTORED_DATA[1]['usage']['compute'][0]['rating'] = {\n 'price': 0.42}\n\nSTORED_DATA = split_storage_data(STORED_DATA)\n", "sub_path": "cloudkitty/tests/samples.py", "file_name": "samples.py", "file_ext": "py", "file_size_in_byte": 6921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cloudkitty.utils.ts2iso", "line_number": 28, "usage_type": "call"}, {"api_name": "cloudkitty.utils", "line_number": 28, "usage_type": "name"}, {"api_name": "cloudkitty.utils.ts2iso", "line_number": 30, "usage_type": "call"}, {"api_name": "cloudkitty.utils", "line_number": 30, "usage_type": "name"}, {"api_name": "cloudkitty.utils.ts2iso", "line_number": 32, "usage_type": "call"}, {"api_name": "cloudkitty.utils", "line_number": 32, "usage_type": "name"}, {"api_name": "cloudkitty.utils.ts2iso", "line_number": 34, "usage_type": "call"}, {"api_name": "cloudkitty.utils", "line_number": 34, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 81, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 86, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 94, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 192, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 194, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 196, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 198, "usage_type": "call"}, {"api_name": "cloudkitty.utils.ts2iso", "line_number": 205, "usage_type": "call"}, {"api_name": "cloudkitty.utils", "line_number": 205, "usage_type": "name"}, {"api_name": "cloudkitty.utils.ts2iso", "line_number": 206, "usage_type": "call"}, {"api_name": "cloudkitty.utils", "line_number": 206, "usage_type": "name"}, {"api_name": "six.iteritems", "line_number": 209, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 210, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "135032621", "text": "import subprocess\r\nimport youtube_dl\r\nimport os\r\nimport eyed3\r\n\r\ndef get_audio(url, artist, title):\r\n root_dir = \"C:\\\\Users\\\\robfa\\\\Documents\\\\Python\\\\Youtube Thing\"\r\n temp_dir = root_dir + \"\\\\Temp\"\r\n spotify_dir = \"C:\\\\Users\\\\robfa\\\\Music\\\\From YouTube\"\r\n options = {\r\n 'format': 'bestaudio/best', # choice of quality\r\n 'extractaudio': True, # only keep the audio\r\n 'noplaylist': True, # only download single song, not playlist\r\n }\r\n\r\n temp_contents = os.listdir(temp_dir)\r\n\r\n os.chdir(temp_dir)\r\n with youtube_dl.YoutubeDL(options) as ydl:\r\n ydl.download(url)\r\n os.chdir(root_dir)\r\n\r\n new_temp_contents = os.listdir(temp_dir)\r\n\r\n for x in temp_contents:\r\n if x in new_temp_contents:\r\n new_temp_contents.remove(x)\r\n\r\n for x in new_temp_contents:\r\n file_name, file_extension = os.path.splitext(x)\r\n downloaded_file_path = temp_dir + \"\\\\\" + x\r\n processed_file_path = temp_dir + \"\\\\\" + file_name + \".mp3\"\r\n #subprocess.run([\"cmd.bat\", downloaded_file_path, processed_file_path])\r\n subprocess.run([\"C:\\\\Program Files\\\\ffmpeg\\\\bin\\\\ffmpeg.exe\", \"-y\", \"-i\", downloaded_file_path, \"-acodec\", \"libmp3lame\", \"-ab\", \"128k\", processed_file_path])\r\n #Use of ffmpeg path will be irritating - can we pick this up dynamically (environment variables?)\r\n #\"C:\\Program Files\\ffmpeg\\bin\\ffmpeg.exe\" -y -i %1 -acodec libmp3lame -ab 128k %2\r\n #\r\n\r\n final_file_path = spotify_dir + \"\\\\\" + file_name + \".mp3\"\r\n\r\n tag_file = eyed3.load(processed_file_path)\r\n tag_file.tag.artist = artist\r\n tag_file.tag.title = title\r\n tag_file.tag.album = title\r\n tag_file.tag.save()\r\n\r\n try:\r\n os.rename(processed_file_path, final_file_path)\r\n except Exception as e:\r\n raise e\r\n\r\n try:\r\n os.remove(downloaded_file_path)\r\n except Exception as e:\r\n raise e\r\n\r\n\r\ndef main():\r\n\r\n get_audio(['https://www.youtube.com/watch?v=RPxvTd_jCPQ'], \"Young Scrolls\", \"Sheogorath - Zoom\")\r\n #with open(\"C:\\\\Users\\\\robfa\\\\Desktop\\\\BookMarks.html\", \"r\") as file:\r\n # for line in file:\r\n # try:\r\n # get_audio([line], '', '')\r\n # except:\r\n # pass\r\n ##get_audio(['https://www.youtube.com/watch?v=CsvhTfv-_Sw'], 'Dmitry Glushkov', 'Gimme Gimme Gimme')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n", "sub_path": "youtube_converter.py", "file_name": "youtube_converter.py", "file_ext": "py", "file_size_in_byte": 2465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 18, "usage_type": "call"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 19, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 34, "usage_type": "call"}, {"api_name": "eyed3.load", "line_number": 41, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 48, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "454394548", "text": "import torch\nimport torch.nn.functional as F\nimport numpy as np\nimport utils\nimport config\nfrom model import MyAlexNet, MyAlexNetCAM\nfrom PIL import Image\nfrom dataloader import transform\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\nimport os\nimport time\n\ndef get_best_model(return_weights=False):\n params = utils.load_params()\n model = MyAlexNet(params).to(device=params.device)\n checkpoint = os.path.join(config.model_dir, 'last.pth.tar')\n utils.load_checkpoint(checkpoint, model, params)\n if return_weights:\n weights = {\n \"conv1\":model.conv1[0].weight.data,\n \"conv2\":model.conv2[0].weight.data,\n \"conv3\":model.conv3[0].weight.data,\n \"conv4\":model.conv4[0].weight.data,\n \"conv5\":model.conv5[0].weight.data,\n }\n return weights\n return model, params\n\ndef predict(X, y=None):\n \"\"\"\n Args:\n X (list): a list of chest x-rays in numpy array. (h, w, 1)\n y (list): a list of labels (0 or 1)\n \"\"\"\n model, params = get_best_model()\n model.eval()\n with torch.no_grad():\n X = utils.preprocess(X)\n output = model(X)\n prob = F.softmax(output, dim=1)\n\n prob = prob.data.cpu().numpy()\n conv1 = model.conv1_out.cpu().numpy()\n conv2 = model.conv2_out.cpu().numpy()\n conv3 = model.conv3_out.cpu().numpy()\n conv4 = model.conv4_out.cpu().numpy()\n conv5 = model.conv5_out.cpu().numpy()\n fc1 = model.fc1_out.cpu()\n fc2 = model.fc2_out.cpu()\n\n y_pred = np.argmax(prob, axis = 1)\n prob = prob[:, 1]\n activation = {\n \"conv1\": conv1,\n \"conv2\": conv2,\n \"conv3\": conv3,\n \"conv4\": conv4,\n \"conv5\": conv5,\n \"fc1\": fc1,\n \"fc2\": fc2\n }\n\n if y is not None:\n print(\"Accuracy:\", np.mean(y == y_pred))\n return y_pred, prob, activation\n\ndef predict_CAM(X, y=None):\n \"\"\"\n Args:\n X (list): a list of chest x-rays in numpy array. (h, w, 1)\n y (list): a list of labels (0 or 1)\n \"\"\"\n params = utils.load_params()\n model = MyAlexNetCAM(params).to(device=params.device)\n checkpoint = os.path.join(config.cam_model_dir, 'last.pth.tar')\n utils.load_checkpoint(checkpoint, model, params)\n model.eval()\n with torch.no_grad():\n X = utils.preprocess(X)\n output = model(X)\n prob = F.softmax(output, dim=1)\n\n prob = prob.data.cpu().numpy()\n conv5 = model.conv5_out.cpu().numpy()\n weights = model.fc.weight.detach().numpy()\n\n y_pred = np.argmax(prob, axis = 1)\n w = weights[y_pred].reshape(1, -1, 1, 1)\n cam = np.sum((conv5 * w), axis=1)\n\n # conv5 = conv5[0]\n # cam = np.zeros(shape=conv5.shape[1:3])\n # weight = weights[y_pred, :].reshape()\n # for i, w in enumerate(weight):\n # cam += w * conv5[i, :, :]\n\n if y is not None:\n print(\"Accuracy:\", np.mean(y == y_pred))\n return cam\n", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 3019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "utils.load_params", "line_number": 15, "usage_type": "call"}, {"api_name": "model.MyAlexNet", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.model_dir", "line_number": 17, "usage_type": "attribute"}, {"api_name": "utils.load_checkpoint", "line_number": 18, "usage_type": "call"}, {"api_name": "model.conv1", "line_number": 21, "usage_type": "attribute"}, {"api_name": "model.conv2", "line_number": 22, "usage_type": "attribute"}, {"api_name": "model.conv3", "line_number": 23, "usage_type": "attribute"}, {"api_name": "model.conv4", "line_number": 24, "usage_type": "attribute"}, {"api_name": "model.conv5", "line_number": 25, "usage_type": "attribute"}, {"api_name": "model.eval", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.preprocess", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 41, "usage_type": "name"}, {"api_name": "model.conv1_out.cpu", "line_number": 44, "usage_type": "call"}, {"api_name": "model.conv1_out", "line_number": 44, "usage_type": "attribute"}, {"api_name": "model.conv2_out.cpu", "line_number": 45, "usage_type": "call"}, {"api_name": "model.conv2_out", "line_number": 45, "usage_type": "attribute"}, {"api_name": "model.conv3_out.cpu", "line_number": 46, "usage_type": "call"}, {"api_name": "model.conv3_out", "line_number": 46, "usage_type": "attribute"}, {"api_name": "model.conv4_out.cpu", "line_number": 47, "usage_type": "call"}, {"api_name": "model.conv4_out", "line_number": 47, "usage_type": "attribute"}, {"api_name": "model.conv5_out.cpu", "line_number": 48, "usage_type": "call"}, {"api_name": "model.conv5_out", "line_number": 48, "usage_type": "attribute"}, {"api_name": "model.fc1_out.cpu", "line_number": 49, "usage_type": "call"}, {"api_name": "model.fc1_out", "line_number": 49, "usage_type": "attribute"}, {"api_name": "model.fc2_out.cpu", "line_number": 50, "usage_type": "call"}, {"api_name": "model.fc2_out", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.load_params", "line_number": 74, "usage_type": "call"}, {"api_name": "model.MyAlexNetCAM", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "config.cam_model_dir", "line_number": 76, "usage_type": "attribute"}, {"api_name": "utils.load_checkpoint", "line_number": 77, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.preprocess", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "model.conv5_out.cpu", "line_number": 85, "usage_type": "call"}, {"api_name": "model.conv5_out", "line_number": 85, "usage_type": "attribute"}, {"api_name": "model.fc.weight.detach", "line_number": 86, "usage_type": "call"}, {"api_name": "model.fc", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "6254751", "text": "##this goes on pynq board with flask and its a client\r\nfrom flask import Flask\r\nimport socket # Import socket module\r\nimport subprocess\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n\r\n##\r\n s = socket.socket() # Create a socket object\r\n host = socket.gethostname() # Get local machine name\r\n port = 60000 # Reserve a port for your service.\r\n\r\n s.connect(('172.20.10.6', port))\r\n #s.send('ello server')\r\n data=\"I am CLIENT\\n\"\r\n s.send(data.encode())\r\n with open('/home/xilinx/webapp/input_img/received_file', 'wb') as f:\r\n print ('file opened')\r\n while True:\r\n # print('receiving data...')\r\n data = s.recv(1024)\r\n #print('data=%s', (data))\r\n if not data:\r\n break\r\n # write data to a file\r\n f.write(data)\r\n\r\n f.close()\r\n print('Successfully get the file')\r\n s.close()\r\n print('connection closed')\r\n print('before yolo execution')\r\n returned_output = subprocess.check_output('sudo python3 /home/xilinx/jupyter_notebooks/qnn/tiny-yolo-client-server.py', shell=True)\r\n #print('returned_output is: ', returned_output)\r\n #print('Current date is:', returned_output.decode(\"utf-8\"))\r\n\r\n print('before printing res')\r\n\r\n receive_port = 8900\r\n s.connect(('172.20.10.6', port))\r\n filename= '/home/xilinx/jupyter_notebooks/qnn/probabilities.txt'\r\n dict = open(filename,'rb')\r\n ll = dict.read(1024)\r\n while (ll):\r\n conn.send(ll)\r\n print('sent')\r\n ll = file.read(1024)\r\n file.close()\r\n print('Done sending')\r\n conn.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, port=9000, host='0.0.0.0')\r\n", "sub_path": "flask_client.py", "file_name": "flask_client.py", "file_ext": "py", "file_size_in_byte": 1753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 12, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "186179767", "text": "import os\nimport chardet\nimport time\nimport pandas\nimport matplotlib.pyplot as plt\n\n\n#Trouver la taille du fichier en Kilo-octets \ndef size_ko (path) :\n file_size_ko=os.path.getsize(path)/1024 #concersion octets / Koctets\n string_file_size_ko = str(file_size_ko)\n print(string_file_size_ko + \"Ko\")\n\n\n#Trouver l'encodage d'un fichier\ndef encoding (path):\n file = open(path, 'rb').read()\n result = chardet.detect(file)\n print(\"Encoding: \" + result['encoding'] )\n \n#Dernière date de modification du fichier\ndef date_last_modif (path):\n print (time.ctime(os.path.getctime(path)))\n\n#Chargement des données\ndef chargement(path) :\n try:\n dataframe = pandas.read_csv(path, sep =',|\",\"', quotechar='\"', index_col = 1, engine = 'python')\n except (e) :\n print(e)\n return dataframe, dataframe.shape\n\n#Ecriture du premier CSV dans un autre avec un autre format\ndef reloading_csv(path):\n try:\n dataframe = pandas.read_csv(path, sep =',|\",\"', quotechar='\"', engine ='python')\n dataframe.to_csv(path)\n print(\"Copie terminée\")\n except (e):\n print(e)\n\n#Retourner le nombre de colonnes et de lignes\ndef rows_columns (path) :\n try:\n dataframe = pandas.read_csv(path, sep =',|\",\"', quotechar='\"', index_col = 1, engine = 'python')\n lignes, colonnes = dataframe.shape\n print (\"Il y a \" + str(colonnes) + \" colonnes et il a \" + str(lignes) + \" lignes\")\n except (e) :\n print(e)\n\n#Retourne le max, min, avg de sepal_length\ndef info_sepal_length (path):\n try:\n dataframe = pandas.read_csv(path, sep =',|\",\"', quotechar='\"', index_col = 0, engine = 'python')\n \n #min_sepal_length = dataframe['Sepal.Length'].min()\n #avg_sepal_length = dataframe['Sepal.Length'].mean()\n #print('Max Sepal.Length ' + str(max_sepal_length))\n #print('Min Sepal.Length ' + str(min_sepal_length))\n #print('Average Sepal.Length ' + str(avg_sepal_length))\n \n except (e) :\n print(e)\n\n max_sepal_length = dataframe['Sepal.Length'].max()\n\n#retourne le nombre de colonne\ndef nombre_colonnes (path):\n try:\n dataframe = pandas.read_csv(path, sep =',|\",\"', quotechar='\"', index_col = 0, engine = 'python', header = 0 )\n except (e) :\n print(e)\n print(\"Il y a \"+str(len(dataframe.select_dtypes(include=['float64']).columns))+\" variables qualitatives : \" + str((dataframe.select_dtypes(include=['float64']).columns)) )\n print(\"Il y a \"+str(len(dataframe.select_dtypes(exclude=['float64']).columns))+\" variables quantitatives : \" + str((dataframe.select_dtypes(exclude=['float64']).columns)) )\n #+len(dataframe.select_dtypes(include=['str']).columns))\n #print(\"Il y a \"+str(len(dataframe.select_dtypes(exclude=['str']).columns))+\" variables quantitatives\")\n #return \"Il y a \"+ str(len(dataframe.columns))+\" variables et il y a \" +str(len(dataframe.columns)*len(dataframe.index)) + \" valeurs\"\n #return dataframe.dtypes\n print(\"Voici les max : \" + str(dataframe.max()))\n print(\"Voici les min : \" + str(dataframe.min()))\n print(\"Voici les moyennes : \" + str(dataframe.mean()))\n print(\"Voici les médianes : \" + str(dataframe.med()))\n\n#Retourne un histogramme variables quantitatives\ndef hist_figure_quantitative (path) :\n try:\n dataframe = pandas.read_csv(path, sep =',|\",\"', quotechar='\"', index_col = 0, engine = 'python', header = 0 )\n except (e) :\n print(e)\n hist = dataframe.hist()\n plt.savefig(path+'figure.pdf')\n\n#Retourne la fréquence\ndef hist_figure_qual (path) :\n try:\n dataframe = pandas.read_csv(path, sep =',|\",\"', quotechar='\"', index_col = 0, engine = 'python', header = 0 )\n except (e) :\n print(e)\n counts = dataframe.count()\n return counts\n\n", "sub_path": "get_file_size.py", "file_name": "get_file_size.py", "file_ext": "py", "file_size_in_byte": 3784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.getsize", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "chardet.detect", "line_number": 18, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.getctime", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "64804330", "text": "\"\"\"\\\nImplementation of Cohesive Markers hypothesis.\n\nOrigin: On the Features of Translationese, VV, NO & SW\n 4.2 Explicitation, Cohesive Markers\n\"\"\"\n\nimport nltk\nfrom translationese.utils import sparse_dict_increment\n\nCOHESIVE_MARKERS = [\"as for\",\n\"as to\",\n\"because\",\n\"besides\",\n\"but\",\n\"consequently\",\n\"despite\",\n\"even if\",\n\"even though\",\n\"except\",\n\"further\",\n\"furthermore\",\n\"hence\",\n\"however\",\n\"in addition\",\n\"in conclusion\",\n\"in other words\",\n\"in spite\",\n\"instead\",\n\"is to say\",\n\"maybe\",\n\"moreover\",\n\"nevertheless\",\n\"on account of\",\n\"on the contrary\",\n\"on the other hand\",\n\"otherwise\",\n\"referring to\",\n\"since\",\n\"so\",\n\"the former\",\n\"the latter\",\n\"therefore\",\n\"this implies\",\n\"though\",\n\"thus\",\n\"with reference to\",\n\"with regard to\",\n\"yet\",\n\"concerning\"]\n\ndef quantify(analysis):\n result = {}\n \n tokenized_markers = [(marker,nltk.word_tokenize(marker)) for marker in COHESIVE_MARKERS]\n text = analysis.tokens()\n \n for i, _ in enumerate(text):\n for (marker,tokenized) in tokenized_markers:\n if (tokenized == text[i:i+len(tokenized)]):\n sparse_dict_increment(result, marker)\n\n pairs = [ (marker, float(result[marker]) / len(text)) for marker in result.keys()]\n \n return dict(pairs)\n", "sub_path": "translationese/cohesive_markers.py", "file_name": "cohesive_markers.py", "file_ext": "py", "file_size_in_byte": 1246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "nltk.word_tokenize", "line_number": 55, "usage_type": "call"}, {"api_name": "translationese.utils.sparse_dict_increment", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "297278525", "text": "\"\"\"remove not null constraint on item.description\n\nRevision ID: 72e301ad1550\nRevises: 6ff1853017d1\nCreate Date: 2020-02-07 21:26:43.169870\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '72e301ad1550'\ndown_revision = '6ff1853017d1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('appuser', 'home',\n type_=sa.String(),\n nullable=True)\n op.alter_column('item', 'description',\n existing_type=sa.TEXT(),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('appuser', 'home',\n type_=sa.INTEGER(),\n nullable=True)\n op.alter_column('item', 'description',\n existing_type=sa.TEXT(),\n nullable=False)\n # ### end Alembic commands ###\n", "sub_path": "pryce/database/migrations/versions/72e301ad1550_remove_not_null_constraint_on_item_.py", "file_name": "72e301ad1550_remove_not_null_constraint_on_item_.py", "file_ext": "py", "file_size_in_byte": 1012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "alembic.op.alter_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.TEXT", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}, {"api_name": "sqlalchemy.TEXT", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "298294730", "text": "import sqlite3\nimport names\nfrom random import randint, uniform\nimport uuid\n\n\ndef add_place_status_to_db(conn, table, place_id, spaceship_id):\n conn.execute('INSERT INTO ' + table + ' (place_id, space_ship_id) VALUES (' + str(place_id) + ', ' + str(spaceship_id) + ');')\n\n\nconn = sqlite3.connect('StarWars.db')\n\n# arrives_to\nfor i in range(1, 1001):\n place_id = randint(1, 1001)\n add_place_status_to_db(conn, 'arrives_to', place_id, i)\n\n# departs_from\nfor i in range(1, 1001):\n place_id = randint(1, 1001)\n add_place_status_to_db(conn, 'departs_from', place_id, i)\n\n# is_in\nfor i in range(1, 1001):\n place_id = randint(1, 1001)\n add_place_status_to_db(conn, 'is_in', place_id, i)\n\nconn.commit()\nconn.close()\n\n\n", "sub_path": "fill_arrives_departs_in.py", "file_name": "fill_arrives_departs_in.py", "file_ext": "py", "file_size_in_byte": 731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sqlite3.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 15, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "166516700", "text": "from __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\n\r\nimport argparse\r\n\r\nimport backtrader as bt\r\n#import backtrader.feeds as btfeeds\r\n\r\nimport pandas\r\n\r\n\"\"\"\r\n# Create a Stratey\r\nclass TestStrategy(bt.Strategy):\r\n\r\n def log(self, txt, dt=None):\r\n ''' Logging function fot this strategy'''\r\n dt = dt or self.datas[0].datetime.date(0)\r\n print('%s, %s' % (dt.isoformat(), txt))\r\n\r\n def __init__(self):\r\n # Keep a reference to the \"close\" line in the data[0] dataseries\r\n self.dataclose = self.datas[0].close\r\n\r\n def next(self):\r\n # Simply log the closing price of the series from the reference\r\n self.log('Close, %.2f' % self.dataclose[0])\r\n\"\"\"\r\nclass TestStrategy(bt.Strategy):\r\n \"\"\"\r\n params = (\r\n ('stake', 4000),\r\n ('exitbars', 4),\r\n )\r\n \"\"\"\r\n params = (\r\n ('s_maperiod', 5),\r\n ('l_maperiod', 10),\r\n ('stake', 100),\r\n )\r\n\r\n def log(self, txt, dt=None):\r\n ''' Logging function fot this strategy'''\r\n dt = dt or self.datas[0].datetime.date(0)\r\n print('%s, %s' % (dt.isoformat(), txt))\r\n\r\n def __init__(self):\r\n # Keep a reference to the \"close\" line in the data[0] dataseries\r\n self.dataclose = self.datas[0].close\r\n \r\n # Set the sizer stake from the params\r\n self.sizer.setsizing(self.params.stake)\r\n\r\n # To keep track of pending orders and buy price/commission\r\n self.order = None\r\n self.buyprice = None\r\n self.buycomm = None\r\n \r\n # Add a MovingAverageSimple indicator\r\n self.s_sma = bt.indicators.SimpleMovingAverage(\r\n self.datas[0], period=self.params.s_maperiod)\r\n self.l_sma = bt.indicators.SimpleMovingAverage(\r\n self.datas[0], period=self.params.l_maperiod)\r\n \r\n # Indicators for the plotting show\r\n bt.indicators.ExponentialMovingAverage(self.datas[0], period=25)\r\n bt.indicators.WeightedMovingAverage(self.datas[0], period=25,\r\n subplot=True)\r\n bt.indicators.StochasticSlow(self.datas[0])\r\n bt.indicators.MACDHisto(self.datas[0])\r\n rsi = bt.indicators.RSI(self.datas[0])\r\n bt.indicators.SmoothedMovingAverage(rsi, period=10)\r\n bt.indicators.ATR(self.datas[0], subplot=True)#plot=False)\r\n\r\n def notify_order(self, order):\r\n if order.status in [order.Submitted, order.Accepted]:\r\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\r\n return\r\n\r\n # Check if an order has been completed\r\n # Attention: broker could reject order if not enougth cash\r\n if order.status in [order.Completed, order.Canceled, order.Margin]:\r\n if order.isbuy():\r\n self.log(\r\n 'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %\r\n (order.executed.price,\r\n order.executed.value,\r\n order.executed.comm))\r\n\r\n self.buyprice = order.executed.price\r\n self.buycomm = order.executed.comm\r\n else: # Sell\r\n self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %\r\n (order.executed.price,\r\n order.executed.value,\r\n order.executed.comm))\r\n\r\n self.bar_executed = len(self)\r\n\r\n # Write down: no pending order\r\n self.order = None\r\n\r\n def notify_trade(self, trade):\r\n if not trade.isclosed:\r\n return\r\n\r\n self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %\r\n (trade.pnl, trade.pnlcomm))\r\n\r\n def next(self):\r\n # Simply log the closing price of the series from the reference\r\n self.log('Close, %.2f' % self.dataclose[0])\r\n\r\n # Check if an order is pending ... if yes, we cannot send a 2nd one\r\n if self.order:\r\n return\r\n\r\n # Check if we are in the market\r\n if not self.position:\r\n \"\"\"\r\n # Not yet ... we MIGHT BUY if ...\r\n if self.dataclose[0] > 1.01*self.dataclose[-1]: #and self.dataclose[0] > 1.03*self.dataclose[-1]:\r\n # current close less than previous close\r\n\r\n if self.dataclose[-1] > 1.01*self.dataclose[-2]:\r\n \"\"\"\r\n if self.dataclose[0] > self.s_sma[0]:\r\n # previous close less than the previous close\r\n\r\n # BUY, BUY, BUY!!! (with default parameters)\r\n self.log('BUY CREATE, %.2f' % self.dataclose[0])\r\n\r\n # Keep track of the created order to avoid a 2nd order\r\n self.order = self.buy()\r\n\r\n else:\r\n\r\n # Already in the market ... we might sell\r\n #if len(self) >= (self.bar_executed + self.params.exitbars):\r\n if self.dataclose[0] < self.l_sma[0]:\r\n # SELL, SELL, SELL!!! (with all possible default parameters)\r\n self.log('SELL CREATE, %.2f' % self.dataclose[0])\r\n\r\n # Keep track of the created order to avoid a 2nd order\r\n self.order = self.sell()\r\ndef runstrat():\r\n args = parse_args()\r\n\r\n # Create a cerebro entity\r\n cerebro = bt.Cerebro(stdstats=False)\r\n\r\n # Add a strategy\r\n cerebro.addstrategy(TestStrategy)\r\n\r\n # Get a pandas dataframe\r\n datapath = ('E:/work/stockAnalyze/update/002678.csv')\r\n\r\n # Simulate the header row isn't there if noheaders requested\r\n skiprows = 1 if args.noheaders else 0\r\n header = None if args.noheaders else 0\r\n column_list=['Date','Open','High','Low','Close','Volume']\r\n dataframe = pandas.read_csv(datapath,\r\n skiprows=skiprows,\r\n header=header,\r\n parse_dates=True,\r\n index_col=0,\r\n names=column_list)\r\n #dataframe.index.name='date'\r\n \r\n if not args.noprint:\r\n print('--------------------------------------------------')\r\n print(dataframe)\r\n print('--------------------------------------------------')\r\n \r\n dataframe[str('Openinterest')]=0.0\r\n print (\"dataframe with openinterest column:\")\r\n print(dataframe)\r\n\r\n # Pass it to the backtrader datafeed and add it to the cerebro\r\n data = bt.feeds.PandasData(dataname=dataframe)\r\n \r\n\r\n cerebro.adddata(data)\r\n \r\n # Set our desired cash start\r\n cerebro.broker.setcash(100000.0)\r\n\r\n # Set the commission - 0.1% ... divide by 100 to remove the %\r\n cerebro.broker.setcommission(commission=0.0025)\r\n\r\n # Print out the starting conditions\r\n print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\r\n\r\n\r\n # Run over everything\r\n cerebro.run()\r\n # Print out the final result\r\n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())\r\n # Plot the result\r\n cerebro.plot(style='bar')\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(\r\n description='Pandas test script')\r\n\r\n parser.add_argument('--noheaders', action='store_true', default=False,\r\n required=False,\r\n help='Do not use header rows')\r\n\r\n parser.add_argument('--noprint', action='store_true', default=False,\r\n help='Print the dataframe')\r\n\r\n return parser.parse_args()\r\n\r\n\r\nif __name__ == '__main__':\r\n runstrat()", "sub_path": "back_trader.py", "file_name": "back_trader.py", "file_ext": "py", "file_size_in_byte": 7577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "backtrader.Strategy", "line_number": 28, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.SimpleMovingAverage", "line_number": 59, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 59, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.SimpleMovingAverage", "line_number": 61, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 61, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.ExponentialMovingAverage", "line_number": 65, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 65, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.WeightedMovingAverage", "line_number": 66, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 66, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.StochasticSlow", "line_number": 68, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 68, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.MACDHisto", "line_number": 69, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 69, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.RSI", "line_number": 70, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 70, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.SmoothedMovingAverage", "line_number": 71, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 71, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.ATR", "line_number": 72, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 72, "usage_type": "attribute"}, {"api_name": "backtrader.Cerebro", "line_number": 149, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 161, "usage_type": "call"}, {"api_name": "backtrader.feeds.PandasData", "line_number": 179, "usage_type": "call"}, {"api_name": "backtrader.feeds", "line_number": 179, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "524671370", "text": "# pip install gensim==3.8.1 --user\r\n\r\nimport pickle\r\nfrom gensim.models.wrappers import LdaMallet\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport shutil\r\nimport datetime\r\nfrom topics import run as save_topic_list, save_topics_with_samples\r\nfrom lda_utils import get_docs_topics\r\n\r\ndef display_single_lda_topic(lda: LdaMallet, topic: int):\r\n topic = \", \".join([x[0] for x in lda.show_topic(topic)])\r\n\r\n return topic\r\n\r\ndef display_lda_topics(lda: LdaMallet):\r\n num_topics = lda.num_topics\r\n topics = [\r\n (i,\r\n \", \".join([x[0] for x in lda.show_topic(i)])\r\n )\r\n for i in range(num_topics)\r\n ]\r\n return topics\r\n\r\n\r\ndef save_frequency_plot(df: pd.DataFrame, title: str, filename: str) -> None:\r\n plot = df.plot(marker='o', linestyle='-', grid=True, figsize=(17,7))\r\n\r\n plot.set_title(title)\r\n\r\n xticks = df.index.to_list()\r\n plot.set_xticks(xticks)\r\n # yticks = df_grouped.values.tolist()\r\n # plot.set_yticks(yticks)\r\n\r\n plot.xaxis.set_tick_params(rotation=45)\r\n plot.patch.set_facecolor('white')\r\n\r\n fig = plot.get_figure()\r\n # fig.autofmt_xdate(bottom=0.2, rotation=90, ha='right')\r\n\r\n fig.savefig(filename)\r\n fig.clf()\r\n\r\n\r\ndef generate_topic_plot(\r\n speeches_all_preproc,\r\n speeches_topics,\r\n topic,\r\n outdir: str,\r\n lda: LdaMallet,\r\n):\r\n\r\n filtered_speeches = [\r\n (\r\n id,\r\n date,\r\n topics\r\n )\r\n for (id, date, topics) in speeches_topics\r\n if topic in topics\r\n ]\r\n\r\n # all speeches\r\n df_year_all = pd.DataFrame([\r\n (i, d, None)\r\n for (i, d, t) in speeches_topics\r\n ], columns=[\"id\", \"date\", \"topics\"])\r\n df_year_all['date'] = pd.to_datetime(df_year_all['date'])\r\n df_year_all['year'] = df_year_all['date'].dt.year\r\n df_year_all_grouped = df_year_all[['year','id']].groupby('year').count()\r\n\r\n\r\n # generate frequency per year for all topics\r\n if topic == 0:\r\n save_frequency_plot(\r\n df=df_year_all_grouped,\r\n title=\"Absolute speech frequency\",\r\n filename=\"{}/frequency_all.png\".format(outdir)\r\n )\r\n # save_topic_list(lda, outdir, \"\")\r\n save_topics_with_samples(lda, speeches_all_preproc, outdir)\r\n\r\n # filtered\r\n df = pd.DataFrame(filtered_speeches, columns=[\"id\", \"date\", \"topics\"])\r\n df['date'] = pd.to_datetime(df['date'])\r\n df['year'] = df['date'].dt.year\r\n\r\n df_grouped = df[['year','id']].groupby('year').count()\r\n df_grouped[\"Total\"] = df_year_all[['year','id']].groupby('year').count()\r\n df_grouped['Percentage'] = df_grouped['id'] * 100 /df_grouped['Total']\r\n\r\n df_grouped = df_grouped['Percentage']\r\n\r\n # df.info()\r\n\r\n save_frequency_plot(\r\n df=df_grouped,\r\n title=\"Relative speech frequency - topic {}\\n{}\".format(topic, display_single_lda_topic(lda, topic)),\r\n filename=\"{}/topic_{}.png\".format(outdir, topic)\r\n )\r\n\r\ndef run(model: str, speeches_all_preproc: str, outdir: str, topic_threshold: int) -> None:\r\n\r\n os.chdir(\"X:/Victor/Documents/TCC/new_discursos/src\")\r\n\r\n lda = LdaMallet.load(model)\r\n lda.mallet_path = \"lib\\\\mallet-2.0.8\\\\bin\\\\mallet\"\r\n\r\n os.environ['MALLET_HOME'] = 'X:\\\\Programs\\\\Java\\\\mallet\\\\mallet-2.0.8'\r\n\r\n docs_topics = get_docs_topics(lda)\r\n\r\n all_speeches = pickle.load(open(speeches_all_preproc, \"rb\"))\r\n\r\n dates = [\r\n (i, s[\"date\"])\r\n for i, s in enumerate(all_speeches)\r\n ]\r\n\r\n sorted_dates = sorted(\r\n dates,\r\n key=lambda x: x[1]\r\n )\r\n\r\n speeches_topics = [\r\n (id, date, docs_topics[id])\r\n for (id, date) in sorted_dates\r\n ]\r\n\r\n speeches_top_topics = [\r\n (\r\n id,\r\n date,\r\n sorted(topics, key=lambda x: x[1], reverse=True)[0:topic_threshold] # top topics\r\n )\r\n for (id, date, topics) in speeches_topics\r\n ]\r\n\r\n # [(32151, '1980-12-05', [21, 25, 88]),\r\n # (39526, '1988-03-15', [78, 8, 87]),\r\n # (13169, '1994-01-03', [60, 52, 7]),\r\n # (51924, '1994-01-03', [27, 70, 81]),\r\n speeches_topic_list = [\r\n (\r\n id,\r\n date,\r\n [topic[0] for topic in topics]\r\n )\r\n for (id, date, topics) in speeches_top_topics\r\n]\r\n\r\n for topic in range(0, lda.num_topics):\r\n generate_topic_plot(\r\n speeches_all_preproc=all_speeches,\r\n speeches_topics=speeches_topic_list,\r\n topic=topic,\r\n outdir=outdir,\r\n lda=lda\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n model_name = \"20_it_10000_NDL\"\r\n model = \"data/models/{}/model\".format(model_name)\r\n speeches_all_preproc = \"data/speeches_all_preproc_filtered_NDL.pickle\"\r\n outdir = \"results/time_topics_{}\".format(model_name)\r\n topic_threshold = 3 # top 1 topics\r\n\r\n if os.path.exists(outdir):\r\n shutil.rmtree(outdir)\r\n os.makedirs(outdir)\r\n\r\n run(model, speeches_all_preproc, outdir, topic_threshold)", "sub_path": "src/scripts/analysis/time_analysis.py", "file_name": "time_analysis.py", "file_ext": "py", "file_size_in_byte": 4723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "gensim.models.wrappers.LdaMallet", "line_number": 13, "usage_type": "name"}, {"api_name": "gensim.models.wrappers.LdaMallet", "line_number": 18, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gensim.models.wrappers.LdaMallet", "line_number": 54, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 72, "usage_type": "call"}, {"api_name": "topics.save_topics_with_samples", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 89, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 108, "usage_type": "call"}, {"api_name": "gensim.models.wrappers.LdaMallet.load", "line_number": 110, "usage_type": "call"}, {"api_name": "gensim.models.wrappers.LdaMallet", "line_number": 110, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 113, "usage_type": "attribute"}, {"api_name": "lda_utils.get_docs_topics", "line_number": 115, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 174, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "172854122", "text": "#!/usr/bin/env python\nfrom datetime import *\nfrom traceback import *\nfrom connection import *\nfrom git_ls_remote import *\nfrom dbgit import *\n\ndef main():\n sql = \"\"\"\nSELECT r.*\nFROM git_repo AS repo\nJOIN git_remote AS r ON r.root=repo.root\nLEFT join git_ls_remote AS ls ON ls.url=r.url\nWHERE ls IS NULL AND repo.pushed>=ls.checked\n\"\"\"\n remotes = session.query(Remote).from_statement(sql).all()\n urls = map(lambda r:r.url,remotes)\n ls_remotes = session.query(LS_Remote).filter(LS_Remote.url.in_(urls)).all()\n total = len(remotes)\n for i,url in enumerate(urls,1):\n try:\n status = \"%s/%s %s\" % (i,total,url)\n print(status.encode(\"utf-8\"))\n _ls_remotes = filter(lambda ls:ls.url==url and ls.ref==\"HEAD\",ls_remotes)\n if _ls_remotes:\n ls_remote = _ls_remotes[0]\n else:\n ls_remote = LS_Remote(url=url,ref=\"HEAD\")\n session.add(ls_remote)\n ls_remote.checked = datetime.now()\n commits = git_ls_remote([url,\"HEAD\"])\n if commits:\n sha,ref = commits[0]\n ls_remote.ref = ref\n ls_remote.sha = sha\n ls_remote.exists = True\n else:\n sha = ref = None\n if isconnected():\n ls_remote.exists = False\n except:\n print(format_exc())\n # update \"checked\"\n session.commit()\n session.flush()\n\nif __name__==\"__main__\":\n main()\n", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 1528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.now", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "445708488", "text": "__author__ = 'marnee'\nfrom jsonschema import validate\n\ncq_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"id\": \"/\",\n \"type\": \"object\",\n \"properties\": {\n \"cq\": {\n \"id\": \"cq\",\n \"type\": \"object\",\n \"properties\": {\n \"subject\": {\n \"id\": \"subject\",\n \"type\": \"string\"\n },\n \"message\": {\n \"id\": \"message\",\n \"type\": \"string\"\n },\n \"interests\": {\n \"id\": \"interests\",\n \"type\": \"array\",\n \"items\": [\n {\n \"id\": \"0\",\n \"type\": \"string\"\n },\n {\n \"id\": \"1\",\n \"type\": \"string\"\n }\n ]\n }\n },\n \"required\": [\n \"subject\",\n \"message\"\n ]\n }\n },\n \"required\": [\n \"cq\"\n ]\n}\n\n\ndef validate_cq(cq_json):\n try:\n validate(cq_json, cq_schema)\n return True\n except:\n return False", "sub_path": "validators/validate_cq_schema.py", "file_name": "validate_cq_schema.py", "file_ext": "py", "file_size_in_byte": 957, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "jsonschema.validate", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "287566160", "text": "import csv\nimport requests\nfrom datetime import date\n\n\nclass TaobaoJsonSpider(object):\n '''\n 通过关键字抓取淘宝商品(第一页)\n '''\n\n def __init__(self):\n self.run()\n\n def run(self):\n keyword = self.get_input()\n result = self.get_source(keyword)\n self.write_csv(keyword, result)\n\n def get_input(self):\n '''\n 获取用户输入的关键字\n '''\n return input('请输入要搜索的词:\\n').strip()\n\n def get_source(self, keyword):\n '''\n 发起搜索并获取json\n '''\n\n source = requests.get('https://s.taobao.com/search?q={}&ajax=true'.format(keyword)).json()\n\n # 拿到当页所有的商品信息\n items = source['mods']['itemlist']['data']['auctions']\n goods_list = []\n\n for item in items:\n\n goods = {\n 'name': item['raw_title'],\n 'link': item['detail_url'],\n 'price': item['view_price'],\n 'shop': item['nick'],\n 'location': item['item_loc'],\n 'deal-cnt': item['view_sales'][:-3]\n\n }\n\n goods_list.append(goods)\n\n # 打印每个商品的信息\n print(goods)\n\n return goods_list\n\n def write_csv(self, keyword, goods_list):\n '''\n 生成csv文件\n '''\n\n with open(keyword + 'Taobao' + date.today().strftime('%Y-%m-%d') + '.csv', 'w', encoding='UTF-8') as f:\n writer = csv.DictWriter(f, fieldnames=['name', 'price', 'shop', 'location', 'deal-cnt', 'link'])\n writer.writeheader()\n writer.writerows(goods_list)\n\n\nif __name__ == '__main__':\n\n TaobaoJsonSpider()\n\n", "sub_path": "TaobaoJsonSpider.py", "file_name": "TaobaoJsonSpider.py", "file_ext": "py", "file_size_in_byte": 1720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 60, "usage_type": "name"}, {"api_name": "csv.DictWriter", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "573198624", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Manager and queryset for the redirects app.\"\"\"\n\nfrom django.db.models import Manager\nfrom django.db.models.query import QuerySet\n\n\nclass RedirectQuerySet(QuerySet):\n\n def get_redirect_path(self, path, language=None, version_slug=None):\n for redirect in self.select_related('project'):\n new_path = redirect.get_redirect_path(\n path=path,\n language=language,\n version_slug=version_slug,\n )\n if new_path:\n return new_path\n return None\n\n\nRedirectManager = Manager.from_queryset(RedirectQuerySet)\n", "sub_path": "readthedocs/redirects/managers.py", "file_name": "managers.py", "file_ext": "py", "file_size_in_byte": 630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.models.query.QuerySet", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.Manager.from_queryset", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models.Manager", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "506983506", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n@Time : 2020/4/17\n@License : (C)Copyright 2017-2019, Micro-Circle\n@Desc : None\n\"\"\"\nimport os\nimport sys\nsys.path.append(os.getcwd())\nprint(sys.path)\n\nfrom selenium import webdriver\nfrom time import sleep\nfrom util.read_ini import ReadIni\nfrom basic.find_element import FindElement\nimport random\nfrom PIL import Image\nfrom api import ShowapiRequest\n \nclass Register(object):\n def __init__(self, url):\n self.driver = self.get_driver(url=url)\n \n # 启动浏览器,打开目标测试页面url\n def get_driver(self, url):\n driver = webdriver.Chrome('./tools/chromedriver.exe')\n driver.get(url=url)\n driver.maximize_window()\n return driver\n \n # 定位用户信息,获取元素element\n def get_user_element(self, key):\n find_element = FindElement(self.driver)\n user_element = find_element.get_element(key=key)\n return user_element\n \n # 输入用户信息\n def send_user_info(self, key, data):\n self.get_user_element(key=key).send_keys(data)\n \n # 获取随机数\n def get_range(self):\n number = ''.join(random.sample('abcdefg123456', 8))\n return number\n \n # 获取验证码图片\n def get_captcha_image(self, file_name):\n self.driver.save_screenshot(filename=file_name)\n captcha_element = self.get_user_element('getcode_num')\n left = captcha_element.location['x']\n top = captcha_element.location['y']\n right = captcha_element.size['width'] + left\n height = captcha_element.size['height'] + top\n image = Image.open(file_name)\n img = image.crop((left, top, right, height))\n img.save(file_name)\n \n # 识别图片验证码\n def discern_captcha_image(self, file_name):\n self.get_captcha_image(file_name=file_name)\n # 解析验证码图片中的文字(用第三方的图片验证码识别接口 ShowApiRequest)\n r = ShowapiRequest(\"http://route.showapi.com/184-4\", \"48120\", \"12c017278c0845c2bcda177212d2d2ac\")\n r.addBodyPara(\"img_base64\", \"\")\n r.addBodyPara(\"typeId\", \"35\")\n r.addBodyPara(\"convert_to_jpg\", \"0\")\n r.addBodyPara(\"needMorePrecise\", \"0\")\n r.addFilePara(\"image\", file_name) # 文件上传时设置\n res = r.post()\n text = res.json()[\"showapi_res_body\"][\"Result\"]\n return 'text'\n \n # 主函数\n def main(self):\n register_nickname = self.get_range()\n register_email = self.get_range() + '@163.com'\n register_password = self.get_range() + '@123'\n file_name = '../image/code_image.png'\n captcha_code = self.discern_captcha_image(file_name=file_name)\n self.send_user_info('register_nickname', register_nickname)\n self.send_user_info('register_email', register_email)\n self.send_user_info('register_password', register_password)\n self.send_user_info('captcha_code', captcha_code)\n self.get_user_element('register-btn').click()\n sleep(5)\n self.driver.close()\n \n \nif __name__ == \"__main__\":\n register_url = 'https://account.aliyun.com/register/register.htm?oauth_callback=https%3A%2F%2Fwww.aliyun.com%2F%3Futm_content%3Dse_1009145079'\n r = Register(register_url)\n r.main()", "sub_path": "common_model/Register.py", "file_name": "Register.py", "file_ext": "py", "file_size_in_byte": 3290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 27, "usage_type": "name"}, {"api_name": "basic.find_element.FindElement", "line_number": 34, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 55, "usage_type": "name"}, {"api_name": "api.ShowapiRequest", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "21407577", "text": "# -*- coding: utf-8 -*-\n\nfrom . import platform\nfrom .log import logger\nfrom .utils import binpath\nimport os\nimport subprocess\n\n\ndef run(*args, **kwargs):\n '''Returns True if successful, False if failure'''\n\n kwargs.setdefault('env', os.environ)\n kwargs.setdefault('shell', True)\n\n try:\n subprocess.check_call(' '.join(args), **kwargs)\n return True\n except subprocess.CalledProcessError:\n logger.debug('Error running: {}'.format(args))\n return False\n\n\ndef cmd():\n '''Return a command to launch a subshell'''\n\n if platform == 'win':\n return ['cmd.exe', '/K']\n\n elif platform == 'linux':\n ppid = os.getppid()\n ppid_cmdline_file = '/proc/{0}/cmdline'.format(ppid)\n try:\n with open(ppid_cmdline_file) as f:\n cmd = f.read()\n if cmd.endswith('\\x00'):\n cmd = cmd[:-1]\n cmd = cmd.split('\\x00')\n return cmd + [binpath('subshell.sh')]\n except:\n cmd = 'bash'\n\n else:\n cmd = 'bash'\n\n return [cmd, binpath('subshell.sh')]\n\n\ndef prompt(prefix=None, colored=True):\n '''Generate a prompt with a given prefix\n\n linux/osx: [prefix] user@host cwd $\n win: [prefix] cwd:\n '''\n\n if platform == 'win':\n return '[{0}] $P$G'.format(prefix)\n else:\n if colored:\n return (\n '[{0}] ' # White prefix\n '\\\\[\\\\033[01;32m\\\\]\\\\u@\\\\h\\\\[\\\\033[00m\\\\] ' # Green user@host\n '\\\\[\\\\033[01;34m\\\\]\\\\w $ \\\\[\\\\033[00m\\\\]' # Blue cwd $\n ).format(prefix)\n return '[{0}] \\\\u@\\\\h \\\\w $ '.format(prefix)\n\n\ndef launch(prompt_prefix=None):\n '''Launch a subshell'''\n\n if prompt_prefix:\n os.environ['PROMPT'] = prompt(prompt_prefix)\n\n subprocess.call(cmd(), env=os.environ.data)\n", "sub_path": "cpenv/shell.py", "file_name": "shell.py", "file_ext": "py", "file_size_in_byte": 1840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "subprocess.check_call", "line_number": 17, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 19, "usage_type": "attribute"}, {"api_name": "log.logger.debug", "line_number": 20, "usage_type": "call"}, {"api_name": "log.logger", "line_number": 20, "usage_type": "name"}, {"api_name": "os.getppid", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.binpath", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.binpath", "line_number": 46, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 72, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 74, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "432356855", "text": "import enum\n\nfrom wasm.typing import (\n UInt8,\n)\n\nfrom .bit_size import (\n BitSize,\n)\n\n\nclass ValType(enum.Enum):\n i32 = 'i32'\n i64 = 'i64'\n f32 = 'f32'\n f64 = 'f64'\n\n def __str__(self) -> str:\n return self.value\n\n def __repr__(self) -> str:\n return str(self)\n\n @classmethod\n def from_byte(cls, byte: UInt8) -> 'ValType':\n if byte == 0x7f:\n return cls.i32\n elif byte == 0x7e:\n return cls.i64\n elif byte == 0x7d:\n return cls.f32\n elif byte == 0x7c:\n return cls.f64\n else:\n raise ValueError(\n \"Provided byte does not map to a value type. Got \"\n f\"'{hex(byte)}'. Must be one of 0x7f|0x7e|0x7d|0x7c\"\n )\n\n @classmethod\n def from_str(cls, type_str: str) -> 'ValType':\n for type_ in cls:\n if type_.value == type_str:\n return type_\n else:\n raise ValueError(\n f\"No ValType match for provided type string: '{type_str}'\"\n )\n\n def to_byte(self) -> UInt8:\n if self is self.i32:\n return UInt8(0x7f)\n elif self is self.i64:\n return UInt8(0x7e)\n elif self is self.f32:\n return UInt8(0x7d)\n elif self is self.f64:\n return UInt8(0x7c)\n else:\n raise Exception(\"Invariant\")\n\n @property\n def is_integer_type(self) -> bool:\n return self in {self.i32, self.i64}\n\n @property\n def is_float_type(self) -> bool:\n return self in {self.f32, self.f64}\n\n @property\n def bit_size(self) -> BitSize:\n if self is self.i32:\n return BitSize.b32\n elif self is self.i64:\n return BitSize.b64\n elif self is self.f32:\n return BitSize.b32\n elif self is self.f64:\n return BitSize.b64\n else:\n raise Exception(\"Invariant\")\n\n @classmethod\n def get_float_type(cls, num_bits: BitSize) -> 'ValType':\n if num_bits is BitSize.b32:\n return cls.f32\n elif num_bits is BitSize.b64:\n return cls.f64\n else:\n raise ValueError(\n f\"Invalid bit size. Must be 32 or 64: Got {num_bits}\"\n )\n\n @classmethod\n def get_integer_type(cls, num_bits: BitSize) -> 'ValType':\n if num_bits == BitSize.b32:\n return cls.i32\n elif num_bits == BitSize.b64:\n return cls.i64\n else:\n raise ValueError(\n f\"Invalid bit size. Must be 32 or 64: Got {num_bits}\"\n )\n", "sub_path": "wasm/datatypes/valtype.py", "file_name": "valtype.py", "file_ext": "py", "file_size_in_byte": 2625, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "enum.Enum", "line_number": 12, "usage_type": "attribute"}, {"api_name": "wasm.typing.UInt8", "line_number": 25, "usage_type": "name"}, {"api_name": "wasm.typing.UInt8", "line_number": 52, "usage_type": "call"}, {"api_name": "wasm.typing.UInt8", "line_number": 54, "usage_type": "call"}, {"api_name": "wasm.typing.UInt8", "line_number": 56, "usage_type": "call"}, {"api_name": "wasm.typing.UInt8", "line_number": 58, "usage_type": "call"}, {"api_name": "wasm.typing.UInt8", "line_number": 50, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b32", "line_number": 73, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 73, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b64", "line_number": 75, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 75, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b32", "line_number": 77, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 77, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b64", "line_number": 79, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 79, "usage_type": "name"}, {"api_name": "bit_size.BitSize", "line_number": 71, "usage_type": "name"}, {"api_name": "bit_size.BitSize", "line_number": 84, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b32", "line_number": 85, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 85, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b64", "line_number": 87, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 87, "usage_type": "name"}, {"api_name": "bit_size.BitSize", "line_number": 95, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 96, "usage_type": "name"}, {"api_name": "bit_size.BitSize.b64", "line_number": 98, "usage_type": "attribute"}, {"api_name": "bit_size.BitSize", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "156287309", "text": "\nfrom synapse.tests.common import SynTest\nfrom synapse.tools.dmon import getArgParser\n\nimport synapse.lib.cli as s_cli\n\nclass TestArgParser(SynTest):\n\n def test_getArgParser_logLevel(self):\n for level in ['debug', 'info', 'warning', 'error', 'critical']:\n p = getArgParser()\n args = p.parse_args(['--log-level', level])\n self.eq(args.log_level, level)\n\n def test_getArgParser_logLevel_exception(self):\n for level in ['all', 'notice']:\n with self.assertRaises(s_cli.CmdArgErr):\n p = getArgParser()\n p.parse_args(['--log-level', level])\n", "sub_path": "synapse/tests/test_dmon.py", "file_name": "test_dmon.py", "file_ext": "py", "file_size_in_byte": 629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "synapse.tests.common.SynTest", "line_number": 7, "usage_type": "name"}, {"api_name": "synapse.tools.dmon.getArgParser", "line_number": 11, "usage_type": "call"}, {"api_name": "synapse.lib.cli.CmdArgErr", "line_number": 17, "usage_type": "attribute"}, {"api_name": "synapse.lib.cli", "line_number": 17, "usage_type": "name"}, {"api_name": "synapse.tools.dmon.getArgParser", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "579991669", "text": "#!/usr/bin/env python3\nimport db\nimport hashlib\nimport json\nimport logging\nimport math\nimport os\nimport re\nimport redis\nimport requests\nimport sys\nimport time\nimport lib\nimport urllib\nimport urllib.request\n\nr = redis.Redis(host='localhost', port=6379, db=0,charset=\"utf-8\", decode_responses=True)\n\nconfig = lib.get_config()\nworld_key = config.get('world') \nworld_key_ix = 0\nalpha_key = config.get('alpha')\n\nwith open('ticker-list.txt') as f:\n ticker_list = f.read().split('\\n')\n\nprint(\"\\n\".join(ticker_list))\nsys.exit(0)\n\nlast = time.time()\ndelay = math.ceil(12)\n\ndef seteasy(row): \n ix = 0\n special = {'RYAAY': 'RyanAir'}\n EXTRA = ',?( ?&? Co.,?| &| and| Technologies|),? (inc.?|p\\.?l\\.?c\\.?|Incorporated|Ltd\\.|N.V.|AG|Holdings|Group|US|ETF|Limited|Corporation|Group|Company|Consolidated|Aktiengesellschaft|Companies|Co.|S\\.?A\\.?|SE|\\(publ\\)|NV|A\\/S|Corp\\.|Series [A-E]|(Common|Plc) New|Registered|Industries|L\\.?P\\.?|Class [A-F]\\.?|\\([a-z]*\\)|AD[RS]|Subordinate|American Depositary Shares|Sponsored|Common Stock|Holding|Communications|International|Technologies)$'\n PRE = '^(The) '\n if row['ticker'] in special:\n row['easyname'] = special[row['ticker']]\n\n elif row['ticker'] in ['AIG', 'TIF', 'NWS', 'FTC']:\n row['easyname'] = re.sub(',? Inc.', '', row['name'], flags=re.IGNORECASE)\n\n else:\n row['easyname'] = re.sub('([a-z])([A-Z])', r'\\1­\\2', row['name'])\n while True:\n reductum = re.sub(PRE, '', \n re.sub(EXTRA, '', row['easyname'], flags=re.IGNORECASE), flags=re.IGNORECASE\n )\n if reductum == row['easyname']:\n break\n row['easyname'] = reductum\n ix += 1\n \n if row.get('industry'):\n row['industry'] = re.sub('REITS', 'Real Estate', row['industry'], flags=re.IGNORECASE)\n row['industry'] = re.sub('(^.+) - (.+)', r'\\2 \\1', row['industry'])\n row['industry'] = re.sub('(integrated|defensive|application)', '', row['industry'], flags=re.IGNORECASE)\n\n #print('{} {:30s} | {}'.format(ix, row['easyname'], row['name']))\n return row\n\ndef ticker2name(ticker):\n res = db.run('select * from stock where ticker=\"{}\"'.format(ticker), with_dict=True).fetchone()\n name = None\n\n if res is not None:\n name = res['name']\n\n if name is None:\n res = {'ticker': ticker}\n\n payload_raw = cache_get('https://financialmodelingprep.com/api/v3/company/profile/{}'.format(ticker)).strip()\n payload = json.loads(payload_raw)\n\n if payload_raw == '{ }':\n # First we accept our legacy redis\n res['name'] = r.hget('name', ticker)\n\n # if that doesn't work then we'll try to get it on the interwebs\n if not res['name']:\n global world_key_ix\n raw = cache_get('https://api.worldtradingdata.com/api/v1/stock?symbol={}'.format(ticker), append='&api_token={}'.format(world_key[world_key_ix]))\n world_key_ix = (world_key_ix + 1) % len(world_key)\n data = json.loads(raw)\n\n try:\n print(raw)\n res['name'] = data['data'][0]['name']\n\n except Exception as ex:\n print(ticker, data)\n\n else:\n res['raw'] = payload_raw\n for x in ['description', 'sector', 'industry']:\n res[x] = payload['profile'][x]\n\n res['name'] = payload['profile']['companyName']\n\n res = seteasy(res)\n db.insert('stock', res)\n\n # so by now we should have a \"res\" with the right info, I hope....\n if not res.get('easyname') or True:\n res = seteasy(res)\n update = {'easyname': res['easyname']}\n if res.get('industry'):\n update['industry'] = res['industry']\n db.update('stock', {'ticker': ticker}, res)\n \n return [ticker, res.get('easyname'), res.get('industry')]\n\nget_names = lambda nameList: [ ticker2name(x) for x in nameList ]\n\ndef cache_get(url, append = False, force = False, wait_until = False, cache_time = 60 * 60 * 24 * 30):\n fname = hashlib.md5(url.encode('utf-8')).hexdigest()\n cname = \"cache/{}\".format(fname)\n key = \"c:{}\".format(fname)\n\n if not r.exists(key) or force:\n if wait_until and wait_until - time.time() > 0:\n time.sleep(wait_until - time.time())\n\n if append:\n url += append\n\n req = urllib.request.Request(url)\n\n with urllib.request.urlopen(req) as response:\n r.set(key, '1', cache_time)\n with open(cname, 'w') as f:\n data = response.read().decode('utf-8')\n f.write(data)\n\n\n if not os.path.isfile(cname) or os.path.getsize(cname) == 0:\n data = r.get(key)\n if len(data) < 3:\n return cache_get(url, append = append, force = True, wait_until = wait_until, cache_time = cache_time)\n \n with open(cname, 'w') as f:\n f.write(r.get(key))\n\n r.set(key, '1')\n\n with open(cname, 'r') as f:\n res = f.read()\n return res\n \n\n\ndef historical(instrumentList = ['MSFT']):\n for instrument in instrumentList:\n try:\n data = my_trader.get_historical_quotes(instrument, 'day', 'week')\n except:\n login(force=True)\n return historical(instrumentList)\n\n duration = 60 * 24\n if data:\n for row in data['historicals']:\n db.insert('historical', {\n 'ticker': instrument,\n 'open': row['open_price'],\n 'close': row['close_price'],\n 'low': row['low_price'],\n 'high': row['high_price'],\n 'begin': row['begins_at'],\n 'duration': duration\n })\n\n\n\nnodate = lambda what: [[x[0], x[1]] for x in db.run(what).fetchall()]\n\ndef get_dates(fields = '*'):\n end = \"and close > 0.1 group by ticker\"\n return {\n 'yesterday': nodate(f\"SELECT {fields},max(begin) FROM historical GROUP BY ticker ORDER BY begin DESC\"),\n 'week': nodate(f\"SELECT {fields},min(begin) FROM historical WHERE begin > strftime('%Y-%m-%d', 'now', '-7 day') group by ticker\"),\n 'month': nodate(f\"\"\"SELECT {fields},min(begin) FROM historical WHERE \n begin > strftime('%Y-%m-%d', 'now', '-1 month') and\n begin < strftime('%Y-%m-%d', 'now', '-21 day') {end}\"\"\"),\n 'year': nodate(f\"\"\"SELECT {fields},min(begin) FROM historical WHERE \n begin > strftime('%Y-%m-%d', 'now', '-1 year') and\n begin < strftime('%Y-%m-%d', 'now', '-11 month') {end}\"\"\"),\n 'decade': nodate(f\"\"\"SELECT {fields},min(begin) FROM historical WHERE \n begin > strftime('%Y-%m-%d', 'now', '-10 year') and\n begin < strftime('%Y-%m-%d', 'now', '-9 year') {end}\"\"\")\n }\n\n\ndef get_archive(stockList):\n global last\n ix = 0\n ttl = 3 * len(stockList)\n\n print(\"Gathering {} stocks\".format(len(stockList)))\n for name,duration in [('MONTHLY', 365.25/12), ('DAILY', 1), ('WEEKLY',7)]:\n duration *= (60 * 24) \n for stock in stockList:\n stock = stock.upper()\n print(\"{:6.3f} {} {} \".format(100 * ix / ttl, name, stock))\n\n force = False\n while True:\n ix += 1\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_{}_ADJUSTED&symbol={}\".format(name, stock)\n cache_time = max(60 * 60 * 24, duration / 2)\n resraw = cache_get(url, force = force, append = '&apikey={}'.format(alpha_key[ix % len(alpha_key)]), wait_until = last + delay, cache_time = cache_time)\n last = time.time()\n\n resjson = json.loads(resraw)\n if \"Note\" in resjson or 'Error Message' in resjson:\n force = True\n\n else:\n break\n\n for k,v in resjson.items():\n if k == 'Meta Data':\n continue\n\n for date,row in v.items():\n db.insert('historical', {\n 'ticker': stock,\n 'open': row['1. open'],\n 'high': row['2. high'],\n 'low': row['3. low'],\n 'close': row['4. close'],\n 'begin': date,\n 'duration': duration\n })\n", "sub_path": "ticker.py", "file_name": "ticker.py", "file_ext": "py", "file_size_in_byte": 7594, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "redis.Redis", "line_number": 17, "usage_type": "call"}, {"api_name": "lib.get_config", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 45, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 47, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 56, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 57, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 58, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 58, "usage_type": "attribute"}, {"api_name": "db.run", "line_number": 64, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 85, "usage_type": "call"}, {"api_name": "db.insert", "line_number": 102, "usage_type": "call"}, {"api_name": "db.update", "line_number": 110, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 122, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 123, "usage_type": "call"}, {"api_name": "time.time", "line_number": 123, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 128, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 128, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 130, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 137, "usage_type": "call"}, {"api_name": "db.insert", "line_number": 164, "usage_type": "call"}, {"api_name": "db.run", "line_number": 176, "usage_type": "call"}, {"api_name": "time.time", "line_number": 213, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 215, "usage_type": "call"}, {"api_name": "db.insert", "line_number": 227, "usage_type": "call"}]} +{"seq_id": "512401731", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/1/11 上午11:16\n\nimport tornado.ioloop\nimport argparse\nimport os\nimport logger\nimport time\nimport signal\nimport logging\nfrom tornado.httpserver import HTTPServer\nfrom tornado.log import access_log\nfrom tornado.web import Application\nfrom jinjaloader import JinjaLoader\nfrom routes import get\n\n\ndef install_tornado_shutdown_handler(ioloop, server, logger=None):\n # see https://gist.github.com/mywaiting/4643396 for more detail\n if logger is None:\n import logging\n logger = logging\n\n def _sig_handler(sig, frame):\n logger.debug(\"Signal %s received. Preparing to stop server.\", sig)\n ioloop.add_callback(shutdown)\n\n def shutdown():\n logger.debug(\"Stopping http server...\")\n server.stop()\n MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 3\n logger.debug(\"Will shutdown in %s seconds\",\n MAX_WAIT_SECONDS_BEFORE_SHUTDOWN)\n deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN\n\n def stop_loop():\n now = time.time()\n if now < deadline and (ioloop._callbacks or ioloop._timeouts):\n ioloop.add_timeout(now + 1, stop_loop)\n logger.debug(\"Waiting for callbacks and timeouts in IOLoop...\")\n else:\n ioloop.stop()\n logger.info(\"Server is shutdown\")\n\n stop_loop()\n\n signal.signal(signal.SIGTERM, _sig_handler)\n signal.signal(signal.SIGINT, _sig_handler)\n\n\nclass BusinessIssues(Application):\n\n def log_request(self, handler):\n if handler.request.uri == '/ping':\n return\n request_time = 1000.0 * handler.request.request_time()\n extra = {'request_handler': handler}\n log_method = access_log.info\n if handler.get_status() < 400:\n if request_time > 3000.0:\n extra['type'] = 'slowreq'\n log_method = access_log.warn\n elif request_time > 10000.0:\n extra['type'] = 'slowreq'\n log_method = access_log.error\n elif handler.get_status < 500:\n log_method = access_log.warn\n else:\n log_method = access_log.error\n\n log_method(\"{}, {}, {}\".format(handler.get_status(), handler._request_summary(), request_time), extra=extra)\n\n\ndef main(args):\n template_loader = JinjaLoader(os.path.join(os.path.dirname(__file__), 'templates/'))\n application = BusinessIssues(get(),\n template_loader=template_loader,\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=False,\n debug=args.debug)\n server = HTTPServer(application, xheaders=True)\n server.listen(args.port)\n logger.setup()\n install_tornado_shutdown_handler(tornado.ioloop.IOLoop.instance(), server, logging.getLogger())\n logging.info('start service at ' + time.ctime() + '\\n')\n tornado.ioloop.IOLoop.current().start()\n\n\nif __name__ == '__main__':\n argp = argparse.ArgumentParser()\n argp.add_argument('--port', required=1, type=int)\n argp.add_argument('--debug', default=1, type=int)\n args = argp.parse_args()\n import sys\n sys.exit(main(args))\n\n", "sub_path": "business_issues.py", "file_name": "business_issues.py", "file_ext": "py", "file_size_in_byte": 3290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logger.debug", "line_number": 26, "usage_type": "call"}, {"api_name": "logger.debug", "line_number": 30, "usage_type": "call"}, {"api_name": "logger.debug", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "logger.debug", "line_number": 41, "usage_type": "call"}, {"api_name": "logger.info", "line_number": 44, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 48, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 48, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 49, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tornado.web.Application", "line_number": 52, "usage_type": "name"}, {"api_name": "tornado.log.access_log.info", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tornado.log.access_log", "line_number": 59, "usage_type": "name"}, {"api_name": "tornado.log.access_log.warn", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tornado.log.access_log", "line_number": 63, "usage_type": "name"}, {"api_name": "tornado.log.access_log.error", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tornado.log.access_log", "line_number": 66, "usage_type": "name"}, {"api_name": "tornado.log.access_log.warn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tornado.log.access_log", "line_number": 68, "usage_type": "name"}, {"api_name": "tornado.log.access_log.error", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tornado.log.access_log", "line_number": 70, "usage_type": "name"}, {"api_name": "jinjaloader.JinjaLoader", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 76, "usage_type": "call"}, {"api_name": "routes.get", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 79, "usage_type": "call"}, {"api_name": "tornado.httpserver.HTTPServer", "line_number": 82, "usage_type": "call"}, {"api_name": "logger.setup", "line_number": 84, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop.IOLoop.instance", "line_number": 85, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 85, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 86, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop.IOLoop.current", "line_number": 87, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 87, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "106393501", "text": "from part_four.models import IssuedToCCO, BallotBoxesIssuedToCCO\nfrom part_four.serializers import IssuedToCCOSerializer, BallotBoxesIssuedToCCOSerializer\nfrom rest_framework import viewsets\n# Create your views here.\n\n\nclass IssuedToCCOViewSet(viewsets.ModelViewSet):\n queryset = IssuedToCCO.objects.all()\n serializer_class = IssuedToCCOSerializer\n\n def get_queryset(self):\n election = self.kwargs['election']\n return IssuedToCCO.objects.filter(election__id=election)\n\n\nclass BallotBoxesIssuedToCCOViewSet(viewsets.ModelViewSet):\n queryset = BallotBoxesIssuedToCCO.objects.all()\n serializer_class = BallotBoxesIssuedToCCOSerializer\n\n def get_queryset(self):\n election = self.kwargs[\"election\"]\n return BallotBoxesIssuedToCCO.objects.filter(election__id=election)\n", "sub_path": "backend/part_four/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 7, "usage_type": "name"}, {"api_name": "part_four.models.IssuedToCCO.objects.all", "line_number": 8, "usage_type": "call"}, {"api_name": "part_four.models.IssuedToCCO.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "part_four.models.IssuedToCCO", "line_number": 8, "usage_type": "name"}, {"api_name": "part_four.serializers.IssuedToCCOSerializer", "line_number": 9, "usage_type": "name"}, {"api_name": "part_four.models.IssuedToCCO.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "part_four.models.IssuedToCCO.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "part_four.models.IssuedToCCO", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 16, "usage_type": "name"}, {"api_name": "part_four.models.BallotBoxesIssuedToCCO.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "part_four.models.BallotBoxesIssuedToCCO.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "part_four.models.BallotBoxesIssuedToCCO", "line_number": 17, "usage_type": "name"}, {"api_name": "part_four.serializers.BallotBoxesIssuedToCCOSerializer", "line_number": 18, "usage_type": "name"}, {"api_name": "part_four.models.BallotBoxesIssuedToCCO.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "part_four.models.BallotBoxesIssuedToCCO.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "part_four.models.BallotBoxesIssuedToCCO", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "205010505", "text": "#\n# Copyright (c) 2009-2019 Tom Keffer \n#\n# See the file LICENSE.txt for your full rights.\n#\n\n\"\"\"\nThis module performs two functions:\n1. Adds weather-related extensions to the WeeWX type system.\n2. Uses those extensions to augment packets and records with derived types.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nfrom configobj import ConfigObj\n\nimport weedb\nimport weeutil.logger\nimport weeutil.weeutil\nimport weewx.engine\nimport weewx.manager\nimport weewx.units\nimport weewx.wxformulas\nimport weewx.xtypes\nfrom six.moves import StringIO\nfrom weeutil.weeutil import to_int, to_float, to_bool, TimeSpan\nfrom weewx.units import ValueTuple, mps_to_mph, kph_to_mph, METER_PER_FOOT, CtoF\n\nlog = logging.getLogger(__name__)\n\nDEFAULTS_INI = \"\"\"\n[StdWXCalculate]\n\n ignore_zero_wind = True # If windSpeed is zero, should windDir be set to None?\n rain_period = 900 # Rain rate window\n retain_period = 930 # How long to retain rain events. Should be >= rain_period + archive_delay\n et_period = 3600 # For evapotranspiration\n wind_height = 2.0 # For evapotranspiration. In meters.\n atc = 0.8 # For solar radiation RS\n nfac = 2 # Atmospheric turbidity (2=clear, 4-5=smoggy)\n max_delta_12h = 1800 # When looking up a temperature in the past, how close does the time have to be?\n data_binding = wx_binding\n\n [[Calculations]]\n # Order matters! Type 'pressure' must come before 'altimeter' and 'barometer'\n pressure = prefer_hardware\n altimeter = prefer_hardware\n appTemp = prefer_hardware\n barometer = prefer_hardware\n beaufort = prefer_hardware \n cloudbase = prefer_hardware\n dewpoint = prefer_hardware\n ET = prefer_hardware\n heatindex = prefer_hardware\n humidex = prefer_hardware\n inDewpoint = prefer_hardware\n maxSolarRad = prefer_hardware\n rainRate = prefer_hardware\n windchill = prefer_hardware\n windrun = prefer_hardware\n [[Algorithms]]\n altimeter = aaASOS\n maxSolarRad = RS\n\"\"\"\n\n\nclass StdWXCalculate(weewx.engine.StdService):\n \"\"\"Wrapper class to allow WXCalculate to be used as a WeeWX service\"\"\"\n\n def __init__(self, engine, config_dict):\n \"\"\"Initialize the service.\"\"\"\n super(StdWXCalculate, self).__init__(engine, config_dict)\n\n # Instantiate a WXCalculate object to do the heavy work\n self.calc = WXCalculate(config_dict,\n engine.stn_info.altitude_vt,\n engine.stn_info.latitude_f,\n engine.stn_info.longitude_f,\n engine.db_binder)\n\n # we will process both loop and archive events\n self.bind(weewx.NEW_LOOP_PACKET, self.new_loop_packet)\n self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)\n\n def new_loop_packet(self, event):\n\n self.calc.new_loop_packet(event.packet)\n\n # Now augment the packet with extended types as per the configuration\n self.calc.do_calculations(event.packet, 'loop')\n\n def new_archive_record(self, event):\n self.calc.do_calculations(event.record, 'archive')\n\n def shutDown(self):\n self.calc.shut_down()\n\n\nclass WXCalculate(object):\n \"\"\"This class has two jobs:\n\n - Add derived weather variables (such as dewpoint, heatindex, etc.) to the WeeWX extensible\n type system.\n - Use the type system to augment packets and records, following preferences specified in the\n configuration file.\n \"\"\"\n\n def __init__(self, config_dict, altitude_vt, latitude_f, longitude_f, db_binder=None):\n \"\"\"Initialize the service.\"\"\"\n\n # Start with the default configuration. Make a copy --- we will be modifying it\n merge_dict = ConfigObj(StringIO(DEFAULTS_INI), encoding='utf-8')\n # Now merge in the overrides from the config file\n merge_dict.merge(config_dict)\n # Extract out the part we're interested in\n self.svc_dict = merge_dict['StdWXCalculate']\n\n if db_binder is None:\n db_binder = weewx.manager.DBBinder(config_dict)\n self.db_manager = db_binder.get_manager(\n data_binding=self.svc_dict.get('data_binding', 'wx_binding'),\n initialize=True)\n\n self.ignore_zero_wind = to_bool(self.svc_dict.get('ignore_zero_wind', True))\n\n # Instantiate a PressureCooker to calculate various kinds of pressure\n self.pressure_cooker = PressureCooker(altitude_vt,\n to_int(self.svc_dict.get('max_delta_12h', 1800)),\n self.svc_dict['Algorithms'].get('altimeter',\n 'aaASOS'))\n # Instantiate a RainRater to calculate rainRate\n self.rain_rater = RainRater(to_int(self.svc_dict.get('rain_period', 900)),\n to_int(self.svc_dict.get('retain_period', 930)))\n\n # Instantiate a WXXTypes object to calculate simple scalars (like dewpoint, etc.)\n self.wx_types = WXXTypes(self.svc_dict,\n altitude_vt,\n latitude_f,\n longitude_f)\n\n # Now add all our type extensions into the type system\n weewx.xtypes.xtypes.append(self.pressure_cooker)\n weewx.xtypes.xtypes.append(self.rain_rater)\n weewx.xtypes.xtypes.append(self.wx_types)\n\n # Report about which values will be calculated...\n log.info(\"The following values will be calculated: %s\",\n ', '.join([\"%s=%s\" % (k, self.svc_dict['Calculations'][k])\n for k in self.svc_dict['Calculations']]))\n # ...and which algorithms will be used.\n log.info(\"The following algorithms will be used for calculations: %s\",\n ', '.join([\"%s=%s\" % (k, self.svc_dict['Algorithms'][k])\n for k in self.svc_dict['Algorithms']]))\n\n def new_loop_packet(self, loop_packet):\n # Keep the RainRater up to date:\n self.rain_rater.add_loop_packet(loop_packet, self.db_manager)\n\n def do_calculations(self, data_dict, data_type):\n \"\"\"Augment the data dictionary with derived types as necessary.\n\n data_dict: The incoming LOOP packet or archive record.\n\n data_type: = \"loop\" if LOOP packet;\n = \"record\" if archive record.\n \"\"\"\n if self.ignore_zero_wind:\n self.adjust_winddir(data_dict)\n\n # Go through the list of potential calculations and see which ones need to be done\n for obs in self.svc_dict['Calculations']:\n directive = self.svc_dict['Calculations'][obs]\n # Keys in svc_dict are in unicode. Keys in packets and records are in native strings.\n # Just to keep things consistent, convert.\n obs_type = str(obs)\n if directive == 'software' or directive == 'prefer_hardware' \\\n and (obs_type not in data_dict or data_dict[obs_type] is None):\n try:\n # We need to do a calculation for type 'obs_type'. This may raise an exception.\n new_value = weewx.xtypes.get_scalar(obs_type, data_dict, self.db_manager)\n except weewx.CannotCalculate:\n pass\n except weewx.UnknownType as e:\n log.debug(\"Unknown extensible type '%s'\" % e)\n except weewx.UnknownAggregation as e:\n log.debug(\"Unknown aggregation '%s'\" % e)\n else:\n # If there was no exception, add the results to the dictionary\n data_dict[obs_type] = new_value[0]\n\n @staticmethod\n def adjust_winddir(data):\n \"\"\"If windSpeed is in the data stream, and it is either zero or None, then the\n wind direction is undefined.\n \"\"\"\n if 'windSpeed' in data and not data['windSpeed']:\n data['windDir'] = None\n if 'windGust' in data and not data['windGust']:\n data['windGustDir'] = None\n\n def shut_down(self):\n for xtype in [self.pressure_cooker, self.rain_rater, self.wx_types]:\n # Give the object an opportunity to clean up\n xtype.shut_down()\n # Remove from the type system\n weewx.xtypes.xtypes.remove(xtype)\n self.db_manager = None\n\n\nclass WXXTypes(weewx.xtypes.XType):\n \"\"\"Weather extensions to the WeeWX type extension system that are relatively simple. This is\n for types which are generally stateless, such as dewpoint, heatindex, etc.\n \"\"\"\n\n def __init__(self, svc_dict, altitude_vt, latitude, longitude):\n \"\"\"Initialize an instance of WXXTypes\n\n Args:\n svc_dict: ConfigDict structure with configuration info\n altitude_vt: The altitude of the station as a ValueTuple\n latitude: Its latitude\n longitude: Its longitude\n \"\"\"\n\n self.svc_dict = svc_dict\n self.altitude_vt = altitude_vt\n self.latitude = latitude\n self.longitude = longitude\n\n # window of time for evapotranspiration calculation, in seconds\n self.et_period = to_int(svc_dict.get('et_period', 3600))\n # atmospheric transmission coefficient [0.7-0.91]\n self.atc = to_float(svc_dict.get('atc', 0.8))\n # Fail hard if out of range:\n if not 0.7 <= self.atc <= 0.91:\n raise weewx.ViolatedPrecondition(\"Atmospheric transmission \"\n \"coefficient (%f) out of \"\n \"range [.7-.91]\" % self.atc)\n # atmospheric turbidity (2=clear, 4-5=smoggy)\n self.nfac = to_float(svc_dict.get('nfac', 2))\n # height above ground at which wind is measured, in meters\n self.wind_height = to_float(svc_dict.get('wind_height', 2.0))\n\n def get_scalar(self, obs_type, record, db_manager):\n\n # Get the method name for this observation type\n method_name = 'calc_%s' % obs_type\n try:\n # Now call it with arguments\n return getattr(self, method_name)(obs_type, record, db_manager)\n except AttributeError:\n raise weewx.UnknownType(obs_type)\n\n def calc_maxSolarRad(self, key, data, db_manager):\n try:\n algo = self.svc_dict['Algorithms']['maxSolarRad'].lower()\n except KeyError:\n algo = 'rs'\n altitude_m = weewx.units.convert(self.altitude_vt, 'meter')[0]\n if algo == 'bras':\n val = weewx.wxformulas.solar_rad_Bras(self.latitude, self.longitude, altitude_m,\n data['dateTime'], self.nfac)\n elif algo == 'rs':\n val = weewx.wxformulas.solar_rad_RS(self.latitude, self.longitude, altitude_m,\n data['dateTime'], self.atc)\n else:\n raise weewx.ViolatedPrecondition(\"Unknown solar algorithm '%s'\"\n % self.svc_dict['Algorithms']['maxSolarRad'])\n return ValueTuple(val, 'watt_per_meter_squared', 'group_radiation')\n\n def calc_cloudbase(self, key, data, db_manager):\n if 'outTemp' not in data or 'outHumidity' not in data:\n raise weewx.CannotCalculate(key)\n # Convert altitude to the same unit system as the incoming record\n altitude = weewx.units.convertStd(self.altitude_vt, data['usUnits'])\n # Use the appropriate formula\n if data['usUnits'] == weewx.US:\n formula = weewx.wxformulas.cloudbase_US\n u = 'foot'\n else:\n formula = weewx.wxformulas.cloudbase_Metric\n u = 'meter'\n val = formula(data['outTemp'], data['outHumidity'], altitude[0])\n return ValueTuple(val, u, 'group_altitude')\n\n def calc_ET(self, key, data, db_manager):\n \"\"\"Get maximum and minimum temperatures and average radiation and wind speed for the\n indicated period then calculate the amount of evapotranspiration during the interval.\n Convert to US units if necessary since this service operates in US unit system.\n \"\"\"\n\n if 'interval' not in data:\n # This will cause LOOP data not to be processed.\n raise weewx.CannotCalculate(key)\n\n interval = data['interval']\n end_ts = data['dateTime']\n start_ts = end_ts - self.et_period\n try:\n r = db_manager.getSql(\"SELECT MAX(outTemp), MIN(outTemp), \"\n \"AVG(radiation), AVG(windSpeed), \"\n \"MAX(outHumidity), MIN(outHumidity), \"\n \"MAX(usUnits), MIN(usUnits) FROM %s \"\n \"WHERE dateTime>? AND dateTime <=?\"\n % db_manager.table_name, (start_ts, end_ts))\n except weedb.DatabaseError:\n return ValueTuple(None, None, None)\n\n # Make sure everything is there:\n if r is None or None in r:\n return ValueTuple(None, None, None)\n\n # Unpack the results\n T_max, T_min, rad_avg, wind_avg, rh_max, rh_min, std_unit_min, std_unit_max = r\n\n # Check for mixed units\n if std_unit_min != std_unit_max:\n log.info(\"Mixed unit system not allowed in ET calculation. Skipped.\")\n return ValueTuple(None, None, None)\n std_unit = std_unit_min\n if std_unit == weewx.METRIC or std_unit == weewx.METRICWX:\n T_max = CtoF(T_max)\n T_min = CtoF(T_min)\n if std_unit == weewx.METRICWX:\n wind_avg = mps_to_mph(wind_avg)\n else:\n wind_avg = kph_to_mph(wind_avg)\n # Wind height is in meters, so convert it:\n height_ft = self.wind_height / METER_PER_FOOT\n # Get altitude in feet\n altitude_ft = weewx.units.convert(self.altitude_vt, 'foot')[0]\n\n try:\n ET_rate = weewx.wxformulas.evapotranspiration_US(\n T_min, T_max, rh_min, rh_max, rad_avg, wind_avg, height_ft,\n self.latitude, self.longitude, altitude_ft, end_ts)\n except ValueError as e:\n log.error(\"Calculation of evapotranspiration failed: %s\", e)\n weeutil.logger.log_traceback(log.error)\n ET_inch = None\n else:\n # The formula returns inches/hour. We need the total ET over the interval, so multiply\n # by the length of the interval in hours. Remember that 'interval' is actually in\n # minutes.\n ET_inch = ET_rate * interval / 60.0 if ET_rate is not None else None\n\n # Convert back to the unit system of the incoming record:\n ET = weewx.units.convertStd((ET_inch, 'inch', 'group_rain'), data['usUnits'])\n return ET\n\n @staticmethod\n def calc_dewpoint(key, data, db_manager=None):\n if 'outTemp' not in data or 'outHumidity' not in data:\n raise weewx.CannotCalculate(key)\n if data['usUnits'] == weewx.US:\n val = weewx.wxformulas.dewpointF(data['outTemp'], data['outHumidity'])\n u = 'degree_F'\n else:\n val = weewx.wxformulas.dewpointC(data['outTemp'], data['outHumidity'])\n u = 'degree_C'\n return weewx.units.convertStd((val, u, 'group_temperature'), data['usUnits'])\n\n @staticmethod\n def calc_inDewpoint(key, data, db_manager=None):\n if 'inTemp' not in data or 'inHumidity' not in data:\n raise weewx.CannotCalculate(key)\n if data['usUnits'] == weewx.US:\n val = weewx.wxformulas.dewpointF(data['inTemp'], data['inHumidity'])\n u = 'degree_F'\n else:\n val = weewx.wxformulas.dewpointC(data['inTemp'], data['inHumidity'])\n u = 'degree_C'\n return weewx.units.convertStd((val, u, 'group_temperature'), data['usUnits'])\n\n @staticmethod\n def calc_windchill(key, data, db_manager=None):\n if 'outTemp' not in data or 'windSpeed' not in data:\n raise weewx.CannotCalculate(key)\n if data['usUnits'] == weewx.US:\n val = weewx.wxformulas.windchillF(data['outTemp'], data['windSpeed'])\n u = 'degree_F'\n else:\n val = weewx.wxformulas.windchillC(data['outTemp'], data['windSpeed'])\n u = 'degree_C'\n return weewx.units.convertStd((val, u, 'group_temperature'), data['usUnits'])\n\n @staticmethod\n def calc_heatindex(key, data, db_manager=None):\n if 'outTemp' not in data or 'outHumidity' not in data:\n raise weewx.CannotCalculate(key)\n if data['usUnits'] == weewx.US:\n val = weewx.wxformulas.heatindexF(data['outTemp'], data['outHumidity'])\n u = 'degree_F'\n else:\n val = weewx.wxformulas.heatindexC(data['outTemp'], data['outHumidity'])\n u = 'degree_C'\n return weewx.units.convertStd((val, u, 'group_temperature'), data['usUnits'])\n\n @staticmethod\n def calc_humidex(key, data, db_manager=None):\n if 'outTemp' not in data or 'outHumidity' not in data:\n raise weewx.CannotCalculate(key)\n if data['usUnits'] == weewx.US:\n val = weewx.wxformulas.humidexF(data['outTemp'], data['outHumidity'])\n u = 'degree_F'\n else:\n val = weewx.wxformulas.humidexC(data['outTemp'], data['outHumidity'])\n u = 'degree_C'\n return weewx.units.convertStd((val, u, 'group_temperature'), data['usUnits'])\n\n @staticmethod\n def calc_appTemp(key, data, db_manager=None):\n if 'outTemp' not in data or 'outHumidity' not in data or 'windSpeed' not in data:\n raise weewx.CannotCalculate(key)\n if data['usUnits'] == weewx.US:\n val = weewx.wxformulas.apptempF(data['outTemp'], data['outHumidity'],\n data['windSpeed'])\n u = 'degree_F'\n else:\n # The metric equivalent needs wind speed in mps. Convert.\n windspeed_vt = weewx.units.as_value_tuple(data, 'windSpeed')\n windspeed_mps = weewx.units.convert(windspeed_vt, 'meter_per_second')[0]\n val = weewx.wxformulas.apptempC(data['outTemp'], data['outHumidity'], windspeed_mps)\n u = 'degree_C'\n return weewx.units.convertStd((val, u, 'group_temperature'), data['usUnits'])\n\n @staticmethod\n def calc_beaufort(key, data, db_manager=None):\n if 'windSpeed' not in data:\n raise weewx.CannotCalculate\n windspeed_vt = weewx.units.as_value_tuple(data, 'windSpeed')\n windspeed_kn = weewx.units.convert(windspeed_vt, 'knot')[0]\n return ValueTuple(weewx.wxformulas.beaufort(windspeed_kn), None, None)\n\n @staticmethod\n def calc_windrun(key, data, db_manager=None):\n \"\"\"Calculate wind run. Requires key 'interval'\"\"\"\n if 'windSpeed' not in data or 'interval' not in data:\n raise weewx.CannotCalculate(key)\n\n if data['windSpeed'] is not None:\n if data['usUnits'] == weewx.US:\n val = data['windSpeed'] * data['interval'] / 60.0\n u = 'mile'\n elif data['usUnits'] == weewx.METRIC:\n val = data['windSpeed'] * data['interval'] / 60.0\n u = 'km'\n elif data['usUnits'] == weewx.METRICWX:\n val = data['windSpeed'] * data['interval'] * 60.0 / 1000.0\n u = 'km'\n else:\n raise weewx.ViolatedPrecondition(\"Unknown unit system %s\" % data['usUnits'])\n else:\n val = None\n u = 'mile'\n return weewx.units.convertStd((val, u, 'group_distance'), data['usUnits'])\n\n\nclass PressureCooker(weewx.xtypes.XType):\n \"\"\"Pressure related extensions to the WeeWX type system. \"\"\"\n\n def __init__(self, altitude_vt, max_ts_delta=1800, altimeter_algorithm='aaNOAA'):\n \"\"\"Initialize the PressureCooker.\n\n altitude_vt: The altitude as a ValueTuple\n\n max_ts_delta: When looking up a temperature in the past,\n how close does the time have to be?\n\n altimeter_algorithm: Algorithm to use to calculate altimeter.\n \"\"\"\n self.altitude_vt = altitude_vt\n self.max_ts_delta = max_ts_delta\n if not altimeter_algorithm.startswith('aa'):\n altimeter_algorithm = 'aa%s' % altimeter_algorithm\n self.altimeter_algorithm = altimeter_algorithm\n\n # Timestamp (roughly) 12 hours ago\n self.ts_12h = None\n # Temperature 12 hours ago as a ValueTuple\n self.temp_12h_vt = None\n\n def _get_temperature_12h(self, ts, dbmanager):\n \"\"\"Get the temperature as a ValueTuple from 12 hours ago. The value will\n be None if no temperature is available.\n \"\"\"\n\n ts_12h = ts - 12 * 3600\n\n # Look up the temperature 12h ago if this is the first time through,\n # or we don't have a usable temperature, or the old temperature is too stale.\n if self.ts_12h is None \\\n or self.temp_12h_vt is None \\\n or abs(self.ts_12h - ts_12h) < self.max_ts_delta:\n # Hit the database to get a newer temperature.\n record = dbmanager.getRecord(ts_12h, max_delta=self.max_ts_delta)\n if record and 'outTemp' in record:\n # Figure out what unit the record is in ...\n unit = weewx.units.getStandardUnitType(record['usUnits'], 'outTemp')\n # ... then form a ValueTuple.\n self.temp_12h_vt = weewx.units.ValueTuple(record['outTemp'], *unit)\n else:\n # Invalidate the temperature ValueTuple from 12h ago\n self.temp_12h_vt = None\n # Save the timestamp\n self.ts_12h = ts_12h\n\n return self.temp_12h_vt\n\n def get_scalar(self, key, record, dbmanager):\n if key == 'pressure':\n return self.pressure(record, dbmanager)\n elif key == 'altimeter':\n return self.altimeter(record)\n elif key == 'barometer':\n return self.barometer(record)\n else:\n raise weewx.UnknownType(key)\n\n def pressure(self, record, dbmanager):\n \"\"\"Calculate the observation type 'pressure'.\"\"\"\n\n # All of the following keys are required:\n if any(key not in record for key in ['usUnits', 'outTemp', 'barometer', 'outHumidity']):\n raise weewx.CannotCalculate('pressure')\n\n # Get the temperature in Fahrenheit from 12 hours ago\n temp_12h_vt = self._get_temperature_12h(record['dateTime'], dbmanager)\n if temp_12h_vt is None \\\n or temp_12h_vt[0] is None \\\n or record['outTemp'] is None \\\n or record['barometer'] is None \\\n or record['outHumidity'] is None:\n pressure = None\n else:\n # The following requires everything to be in US Customary units.\n # Rather than convert the whole record, just convert what we need:\n record_US = weewx.units.to_US({'usUnits': record['usUnits'],\n 'outTemp': record['outTemp'],\n 'barometer': record['barometer'],\n 'outHumidity': record['outHumidity']})\n # Get the altitude in feet\n altitude_ft = weewx.units.convert(self.altitude_vt, \"foot\")\n # The outside temperature in F.\n temp_12h_F = weewx.units.convert(temp_12h_vt, \"degree_F\")\n pressure = weewx.uwxutils.uWxUtilsVP.SeaLevelToSensorPressure_12(\n record_US['barometer'],\n altitude_ft[0],\n record_US['outTemp'],\n temp_12h_F[0],\n record_US['outHumidity']\n )\n\n # Convert to target unit system and return\n return weewx.units.convertStd((pressure, 'inHg', 'group_pressure'), record['usUnits'])\n\n def altimeter(self, record):\n \"\"\"Calculate the observation type 'altimeter'.\"\"\"\n if 'pressure' not in record:\n raise weewx.CannotCalculate('altimeter')\n\n # Convert altitude to same unit system of the incoming record\n altitude = weewx.units.convertStd(self.altitude_vt, record['usUnits'])\n\n # Figure out which altimeter formula to use, and what unit the results will be in:\n if record['usUnits'] == weewx.US:\n formula = weewx.wxformulas.altimeter_pressure_US\n u = 'inHg'\n else:\n formula = weewx.wxformulas.altimeter_pressure_Metric\n u = 'mbar'\n # Apply the formula\n altimeter = formula(record['pressure'], altitude[0], self.altimeter_algorithm)\n # Convert to the target unit system\n return weewx.units.convertStd((altimeter, u, 'group_pressure'), record['usUnits'])\n\n def barometer(self, record):\n \"\"\"Calculate the observation type 'barometer'\"\"\"\n\n if 'pressure' not in record or 'outTemp' not in record:\n raise weewx.CannotCalculate('barometer')\n\n # Convert altitude to same unit system of the incoming record\n altitude = weewx.units.convertStd(self.altitude_vt, record['usUnits'])\n\n # Figure out what barometer formula to use:\n if record['usUnits'] == weewx.US:\n formula = weewx.wxformulas.sealevel_pressure_US\n u = 'inHg'\n else:\n formula = weewx.wxformulas.sealevel_pressure_Metric\n u = 'mbar'\n # Apply the formula\n barometer = formula(record['pressure'], altitude[0], record['outTemp'])\n # Convert to the target unit system:\n return weewx.units.convertStd((barometer, u, 'group_pressure'), record['usUnits'])\n\n\nclass RainRater(weewx.xtypes.XType):\n \"\"\"\"An extension to the WeeWX type system for calculating rainRate\"\"\"\n\n def __init__(self, rain_period, retain_period):\n \"\"\"Initialize the RainRater.\n\n Args:\n rain_period: The length of the sliding window in seconds.\n retain_period: How long to retain a rain event. Should be rain_period\n plus archive_delay.\n \"\"\"\n self.rain_period = rain_period\n self.retain_period = retain_period\n self.rain_events = None\n self.unit_system = None\n\n def add_loop_packet(self, record, db_manager):\n # Was there any rain? If so, convert the rain to the unit system we are using,\n # then intern it\n if 'rain' in record and record['rain']:\n if self.unit_system is None:\n # Adopt the unit system of the first record.\n self.unit_system = record['usUnits']\n if self.rain_events is None:\n self._setup(record['dateTime'], db_manager)\n # Get the unit system and group of the incoming rain. In theory, this should be\n # the same as self.unit_system, but ...\n u, g = weewx.units.getStandardUnitType(record['usUnits'], 'rain')\n # Convert to the unit system that we are using\n rain = weewx.units.convertStd((record['rain'], u, g), self.unit_system)[0]\n # Add it to the list of rain events\n self.rain_events.append((record['dateTime'], rain))\n\n if self.rain_events:\n # Trim any old packets:\n self.rain_events = [x for x in self.rain_events\n if x[0] >= record['dateTime'] - self.rain_period]\n\n def get_scalar(self, key, record, db_manager):\n \"\"\"Calculate the rainRate\"\"\"\n if key != 'rainRate':\n raise weewx.UnknownType(key)\n\n if self.rain_events is None:\n self._setup(record['dateTime'], db_manager)\n\n # Sum the rain events within the time window...\n rainsum = sum(x[1] for x in self.rain_events\n if x[0] > record['dateTime'] - self.rain_period)\n # ...then divide by the period and scale to an hour\n val = 3600 * rainsum / self.rain_period\n # Get the unit and unit group for rainRate\n u, g = weewx.units.getStandardUnitType(self.unit_system, 'rainRate')\n # Form a ValueTuple, then convert it to the unit system of the incoming record\n rr = weewx.units.convertStd(ValueTuple(val, u, g), record['usUnits'])\n return rr\n\n def _setup(self, stop_ts, db_manager):\n \"\"\"Initialize the rain event list\"\"\"\n if self.rain_events is None:\n self.rain_events = []\n start_ts = stop_ts - self.retain_period\n # Get all rain events since the window start from the database\n for row in db_manager.genSql(\"SELECT dateTime, usUnits, rain FROM %s \"\n \"WHERE dateTime>? AND dateTime<=?;\"\n % db_manager.table_name, (start_ts, stop_ts)):\n # Unpack the row:\n time_ts, unit_system, rain = row\n self.add_loop_packet({'dateTime': time_ts, 'usUnits': unit_system, 'rain': rain},\n db_manager)\n", "sub_path": "dist/weewx-4.0.0b12/bin/weewx/wxservices.py", "file_name": "wxservices.py", "file_ext": "py", "file_size_in_byte": 29288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "weewx.engine.engine", "line_number": 69, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 69, "usage_type": "name"}, {"api_name": "weewx.engine.NEW_LOOP_PACKET", "line_number": 84, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 84, "usage_type": "name"}, {"api_name": "weewx.engine.NEW_ARCHIVE_RECORD", "line_number": 85, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 85, "usage_type": "name"}, {"api_name": "configobj.ConfigObj", "line_number": 114, "usage_type": "call"}, {"api_name": "six.moves.StringIO", "line_number": 114, "usage_type": "call"}, {"api_name": "weewx.engine.manager.DBBinder", "line_number": 121, "usage_type": "call"}, {"api_name": "weewx.engine.manager", "line_number": 121, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 121, "usage_type": "name"}, {"api_name": "weeutil.weeutil.to_bool", "line_number": 126, "usage_type": "call"}, {"api_name": "weeutil.weeutil.to_int", "line_number": 130, "usage_type": "call"}, {"api_name": "weeutil.weeutil.to_int", "line_number": 134, "usage_type": "call"}, {"api_name": "weeutil.weeutil.to_int", "line_number": 135, "usage_type": "call"}, {"api_name": "weewx.engine.xtypes.xtypes.append", "line_number": 144, "usage_type": "call"}, {"api_name": "weewx.engine.xtypes", "line_number": 144, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 144, "usage_type": "name"}, {"api_name": "weewx.engine.xtypes.xtypes.append", "line_number": 145, "usage_type": "call"}, {"api_name": "weewx.engine.xtypes", "line_number": 145, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 145, "usage_type": "name"}, {"api_name": "weewx.engine.xtypes.xtypes.append", "line_number": 146, "usage_type": "call"}, {"api_name": "weewx.engine.xtypes", "line_number": 146, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 146, "usage_type": "name"}, {"api_name": "weewx.engine.xtypes.get_scalar", "line_number": 182, "usage_type": "call"}, {"api_name": "weewx.engine.xtypes", "line_number": 182, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 182, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 183, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 183, "usage_type": "name"}, {"api_name": "weewx.engine.UnknownType", "line_number": 185, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 185, "usage_type": "name"}, {"api_name": "weewx.engine.UnknownAggregation", "line_number": 187, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 187, "usage_type": "name"}, {"api_name": "weewx.engine.xtypes.xtypes.remove", "line_number": 208, "usage_type": "call"}, {"api_name": "weewx.engine.xtypes", "line_number": 208, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 208, "usage_type": "name"}, {"api_name": "weewx.engine.xtypes", "line_number": 212, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 212, "usage_type": "name"}, {"api_name": "weeutil.weeutil.to_int", "line_number": 233, "usage_type": "call"}, {"api_name": "weeutil.weeutil.to_float", "line_number": 235, "usage_type": "call"}, {"api_name": "weewx.engine.ViolatedPrecondition", "line_number": 238, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 238, "usage_type": "name"}, {"api_name": "weeutil.weeutil.to_float", "line_number": 242, "usage_type": "call"}, {"api_name": "weeutil.weeutil.to_float", "line_number": 244, "usage_type": "call"}, {"api_name": "weewx.engine.UnknownType", "line_number": 254, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 254, "usage_type": "name"}, {"api_name": "weewx.engine.units.convert", "line_number": 261, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 261, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 261, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.solar_rad_Bras", "line_number": 263, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 263, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 263, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.solar_rad_RS", "line_number": 266, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 266, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 266, "usage_type": "name"}, {"api_name": "weewx.engine.ViolatedPrecondition", "line_number": 269, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 269, "usage_type": "name"}, {"api_name": "weewx.units.ValueTuple", "line_number": 271, "usage_type": "call"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 275, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 275, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 277, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 277, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 277, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 279, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 279, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas", "line_number": 280, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 280, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas", "line_number": 283, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 283, "usage_type": "name"}, {"api_name": "weewx.units.ValueTuple", "line_number": 286, "usage_type": "call"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 296, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 296, "usage_type": "name"}, {"api_name": "weedb.DatabaseError", "line_number": 308, "usage_type": "attribute"}, {"api_name": "weewx.units.ValueTuple", "line_number": 309, "usage_type": "call"}, {"api_name": "weewx.units.ValueTuple", "line_number": 313, "usage_type": "call"}, {"api_name": "weewx.units.ValueTuple", "line_number": 321, "usage_type": "call"}, {"api_name": "weewx.engine.METRIC", "line_number": 323, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 323, "usage_type": "name"}, {"api_name": "weewx.engine.METRICWX", "line_number": 323, "usage_type": "attribute"}, {"api_name": "weewx.units.CtoF", "line_number": 324, "usage_type": "call"}, {"api_name": "weewx.units.CtoF", "line_number": 325, "usage_type": "call"}, {"api_name": "weewx.engine.METRICWX", "line_number": 326, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 326, "usage_type": "name"}, {"api_name": "weewx.units.mps_to_mph", "line_number": 327, "usage_type": "call"}, {"api_name": "weewx.units.kph_to_mph", "line_number": 329, "usage_type": "call"}, {"api_name": "weewx.units.METER_PER_FOOT", "line_number": 331, "usage_type": "name"}, {"api_name": "weewx.engine.units.convert", "line_number": 333, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 333, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 333, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.evapotranspiration_US", "line_number": 336, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 336, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 336, "usage_type": "name"}, {"api_name": "weeutil.logger.logger.log_traceback", "line_number": 341, "usage_type": "call"}, {"api_name": "weeutil.logger.logger", "line_number": 341, "usage_type": "attribute"}, {"api_name": "weeutil.logger", "line_number": 341, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 350, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 350, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 350, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 356, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 356, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 357, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 357, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.dewpointF", "line_number": 358, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 358, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 358, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.dewpointC", "line_number": 361, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 361, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 361, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 363, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 363, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 363, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 368, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 368, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 369, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 369, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.dewpointF", "line_number": 370, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 370, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 370, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.dewpointC", "line_number": 373, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 373, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 373, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 375, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 375, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 375, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 380, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 380, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 381, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 381, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.windchillF", "line_number": 382, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 382, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 382, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.windchillC", "line_number": 385, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 385, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 385, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 387, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 387, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 387, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 392, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 392, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 393, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 393, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.heatindexF", "line_number": 394, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 394, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 394, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.heatindexC", "line_number": 397, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 397, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 397, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 399, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 399, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 399, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 404, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 404, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 405, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 405, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.humidexF", "line_number": 406, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 406, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 406, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.humidexC", "line_number": 409, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 409, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 409, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 411, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 411, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 411, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 416, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 416, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 417, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 417, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.apptempF", "line_number": 418, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 418, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 418, "usage_type": "name"}, {"api_name": "weewx.engine.units.as_value_tuple", "line_number": 423, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 423, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 423, "usage_type": "name"}, {"api_name": "weewx.engine.units.convert", "line_number": 424, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 424, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 424, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas.apptempC", "line_number": 425, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 425, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 425, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 427, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 427, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 427, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 432, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 432, "usage_type": "name"}, {"api_name": "weewx.engine.units.as_value_tuple", "line_number": 433, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 433, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 433, "usage_type": "name"}, {"api_name": "weewx.engine.units.convert", "line_number": 434, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 434, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 434, "usage_type": "name"}, {"api_name": "weewx.units.ValueTuple", "line_number": 435, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas.beaufort", "line_number": 435, "usage_type": "call"}, {"api_name": "weewx.engine.wxformulas", "line_number": 435, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 435, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 441, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 441, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 444, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 444, "usage_type": "name"}, {"api_name": "weewx.engine.METRIC", "line_number": 447, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 447, "usage_type": "name"}, {"api_name": "weewx.engine.METRICWX", "line_number": 450, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 450, "usage_type": "name"}, {"api_name": "weewx.engine.ViolatedPrecondition", "line_number": 454, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 454, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 458, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 458, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 458, "usage_type": "name"}, {"api_name": "weewx.engine.xtypes", "line_number": 461, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 461, "usage_type": "name"}, {"api_name": "weewx.engine.units.getStandardUnitType", "line_number": 501, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 501, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 501, "usage_type": "name"}, {"api_name": "weewx.engine.units.ValueTuple", "line_number": 503, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 503, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 503, "usage_type": "name"}, {"api_name": "weewx.engine.UnknownType", "line_number": 520, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 520, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 527, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 527, "usage_type": "name"}, {"api_name": "weewx.engine.units.to_US", "line_number": 540, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 540, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 540, "usage_type": "name"}, {"api_name": "weewx.engine.units.convert", "line_number": 545, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 545, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 545, "usage_type": "name"}, {"api_name": "weewx.engine.units.convert", "line_number": 547, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 547, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 547, "usage_type": "name"}, {"api_name": "weewx.engine.uwxutils.uWxUtilsVP.SeaLevelToSensorPressure_12", "line_number": 548, "usage_type": "call"}, {"api_name": "weewx.engine.uwxutils", "line_number": 548, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 548, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 557, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 557, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 557, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 562, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 562, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 565, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 565, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 565, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 568, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 568, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas", "line_number": 569, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 569, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas", "line_number": 572, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 572, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 577, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 577, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 577, "usage_type": "name"}, {"api_name": "weewx.engine.CannotCalculate", "line_number": 583, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 583, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 586, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 586, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 586, "usage_type": "name"}, {"api_name": "weewx.engine.US", "line_number": 589, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 589, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas", "line_number": 590, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 590, "usage_type": "name"}, {"api_name": "weewx.engine.wxformulas", "line_number": 593, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 593, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 598, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 598, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 598, "usage_type": "name"}, {"api_name": "weewx.engine.xtypes", "line_number": 601, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 601, "usage_type": "name"}, {"api_name": "weewx.engine.units.getStandardUnitType", "line_number": 628, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 628, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 628, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 630, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 630, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 630, "usage_type": "name"}, {"api_name": "weewx.engine.UnknownType", "line_number": 642, "usage_type": "call"}, {"api_name": "weewx.engine", "line_number": 642, "usage_type": "name"}, {"api_name": "weewx.engine.units.getStandardUnitType", "line_number": 653, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 653, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 653, "usage_type": "name"}, {"api_name": "weewx.engine.units.convertStd", "line_number": 655, "usage_type": "call"}, {"api_name": "weewx.engine.units", "line_number": 655, "usage_type": "attribute"}, {"api_name": "weewx.engine", "line_number": 655, "usage_type": "name"}, {"api_name": "weewx.units.ValueTuple", "line_number": 655, "usage_type": "call"}]} +{"seq_id": "353202686", "text": "import datetime\nimport io\nimport os\nimport time\nimport json\nimport pytest\nimport threading\n\nimport wandb.run_manager\nfrom wandb.apis import internal\nimport wandb\nfrom wandb import wandb_socket\nfrom wandb.wandb_run import Run, RESUME_FNAME\nfrom wandb.run_manager import FileEventHandlerThrottledOverwriteMinWait, FileEventHandlerOverwriteDeferred, FileEventHandlerOverwrite, FileEventHandlerOverwriteOnce\nfrom click.testing import CliRunner\n\n\ndef test_check_update_available_equal(request_mocker, capsys, query_viewer):\n \"Test update availability in different cases.\"\n test_cases = [\n ('0.8.10', '0.8.10', False),\n ('0.8.9', '0.8.10', True),\n ('0.8.11', '0.8.10', False),\n ('1.0.0', '2.0.0', True),\n ('0.4.5', '0.4.5a5', False),\n ('0.4.5', '0.4.3b2', False),\n ('0.4.5', '0.4.6b2', True),\n ('0.4.5.alpha', '0.4.4', False),\n ('0.4.5.alpha', '0.4.5', True),\n ('0.4.5.alpha', '0.4.6', True)\n ]\n\n for current, latest, is_expected in test_cases:\n with CliRunner().isolated_filesystem():\n query_viewer(request_mocker)\n is_avail = _is_update_avail(\n request_mocker, capsys, current, latest)\n assert is_avail == is_expected, \"expected {} compared to {} to yield update availability of {}\".format(\n current, latest, is_expected)\n\n\ndef _is_update_avail(request_mocker, capsys, current, latest):\n \"Set up the run manager and detect if the upgrade message is printed.\"\n api = internal.Api(\n load_settings=False,\n retry_timedelta=datetime.timedelta(0, 0, 50))\n api.set_current_run_id(123)\n run = Run()\n run_manager = wandb.run_manager.RunManager(run)\n\n # Without this mocking, during other tests, the _check_update_available\n # function will throw a \"mock not found\" error, then silently fail without\n # output (just like it would in a normal network failure).\n response = b'{ \"info\": { \"version\": \"%s\" } }' % bytearray(latest, 'utf-8')\n request_mocker.register_uri('GET', 'https://pypi.org/pypi/wandb/json',\n content=response, status_code=200)\n run_manager._check_update_available(current)\n\n captured_out, captured_err = capsys.readouterr()\n print(captured_out, captured_err)\n return \"To upgrade, please run:\" in captured_err\n\n\ndef test_throttle_file_poller(mocker, run_manager):\n emitter = run_manager.emitter\n assert emitter.timeout == 1\n for i in range(100):\n with open(os.path.join(wandb.run.dir, \"file_%i.txt\" % i), \"w\") as f:\n f.write(str(i))\n run_manager.test_shutdown()\n assert emitter.timeout == 2\n\n\ndef test_pip_freeze(mocker, run_manager):\n run_manager._block_file_observer()\n run_manager.init_run()\n # TODO(adrian): I've seen issues with this test when the W&B version\n # installed for the current python differs from the one (eg. from git)\n # that is running this test. Easy fix is to do \"pip install -e .\"\n reqs = open(os.path.join(wandb.run.dir, \"requirements.txt\")).read()\n print([r for r in reqs.split(\"\\n\") if \"wandb\" in r])\n wbv = \"wandb==%s\" % wandb.__version__\n assert wbv in reqs\n\n\ndef test_spell_sync(mocker, loggedin, run_manager, mock_server, local_netrc):\n run_manager._block_file_observer()\n run_manager.init_run(env={\"SPELL_RUN_URL\": \"https://spell.run/test\"})\n assert mock_server.requests['wandb_url'][0][\"url\"]\n\n\ndef test_custom_file_policy(mocker, run_manager):\n run_manager._block_file_observer()\n run_manager.init_run()\n for i in range(5):\n with open(os.path.join(wandb.run.dir, \"ckpt_%i.txt\" % i), \"w\") as f:\n f.write(str(i))\n wandb.save(\"ckpt*\")\n with open(os.path.join(wandb.run.dir, \"foo.bar\"), \"w\") as f:\n f.write(\"bar\")\n\n run_manager.test_shutdown()\n assert isinstance(\n run_manager._file_event_handlers[\"ckpt_0.txt\"], FileEventHandlerThrottledOverwriteMinWait)\n assert isinstance(\n run_manager._file_event_handlers[\"foo.bar\"], FileEventHandlerOverwriteDeferred)\n assert isinstance(\n run_manager._file_event_handlers[\"wandb-metadata.json\"], FileEventHandlerOverwriteOnce)\n assert isinstance(\n run_manager._file_event_handlers[\"requirements.txt\"], FileEventHandlerOverwrite)\n\n\ndef test_custom_file_policy_symlink(mocker, run_manager):\n mod = mocker.MagicMock()\n mocker.patch(\n 'wandb.run_manager.FileEventHandlerThrottledOverwriteMinWait.on_modified', mod)\n with open(\"ckpt_0.txt\", \"w\") as f:\n f.write(\"joy\")\n with open(\"ckpt_1.txt\", \"w\") as f:\n f.write(\"joy\" * 100)\n wandb.save(\"ckpt_0.txt\")\n with open(\"ckpt_0.txt\", \"w\") as f:\n f.write(\"joy\" * 100)\n wandb.save(\"ckpt_1.txt\")\n run_manager.test_shutdown()\n assert isinstance(\n run_manager._file_event_handlers[\"ckpt_0.txt\"], FileEventHandlerThrottledOverwriteMinWait)\n assert mod.called\n\n\ndef test_file_pusher_doesnt_archive_if_few(mocker, run_manager, mock_server):\n \"Test that only 3 files are uploaded individually.\"\n\n # Mock to increase minimum since some extra files are included with all\n # uploads, increasing the number past the default minimum of 6\n from wandb.file_pusher import FilePusher\n mocker.patch.object(FilePusher, 'BATCH_THRESHOLD_SECS', 0.3)\n mocker.patch.object(FilePusher, 'BATCH_MIN_FILES', 10)\n\n for i in range(2):\n fname = \"ckpt_{}.txt\".format(i)\n with open(fname, \"w\") as f:\n f.write(\"w&b\" * 100)\n wandb.save(fname)\n run_manager.test_shutdown()\n\n filenames = [\n r['variables']['files'][0]\n for r in mock_server.requests['graphql']\n if 'files' in r['variables']]\n\n # assert there is no batching\n assert all('.tgz' not in filename for filename in filenames)\n\n\ndef test_file_pusher_archives_multiple(mocker, run_manager, mock_server):\n \"Test that 100 files are batched.\"\n for i in range(10):\n fname = \"ckpt_{}.txt\".format(i)\n with open(fname, \"w\") as f:\n f.write(\"w&b\" * 100)\n wandb.save(fname)\n run_manager.test_shutdown()\n\n req = [r for r in mock_server.requests['graphql']\n if 'files' in r['variables']][0]\n\n assert 'query Model' in req['query']\n assert req['variables']['name'] == 'testing'\n assert req['variables']['files'] == ['___batch_archive_1.tgz']\n\n\ndef test_remove_auto_resume(mocker, run_manager):\n resume_path = os.path.join(wandb.wandb_dir(), RESUME_FNAME)\n with open(resume_path, \"w\") as f:\n f.write(\"{}\")\n run_manager.test_shutdown()\n assert not os.path.exists(resume_path)\n\n\ndef test_sync_etc_multiple_messages(mocker, run_manager):\n mocked_policy = mocker.MagicMock()\n run_manager.update_user_file_policy = mocked_policy\n payload = json.dumps(\n {\"save_policy\": {\"glob\": \"*.foo\", \"policy\": \"end\"}}).encode(\"utf8\")\n wandb.run.socket.connection.sendall(payload + b\"\\0\" + payload + b\"\\0\")\n run_manager.test_shutdown()\n assert len(mocked_policy.mock_calls) == 2\n\n\ndef test_init_run_network_down(mocker, caplog):\n with CliRunner().isolated_filesystem():\n mocker.patch(\"wandb.apis.internal.Api.HTTP_TIMEOUT\", 0.5)\n api = internal.Api(\n load_settings=False,\n retry_timedelta=datetime.timedelta(0, 0, 50))\n api.set_current_run_id(123)\n run = Run()\n mocker.patch(\"wandb.run_manager.RunManager._upsert_run\",\n lambda *args: time.sleep(0.6))\n rm = wandb.run_manager.RunManager(run)\n step = rm.init_run()\n assert step == 0\n assert \"Failed to connect\" in caplog.text\n", "sub_path": "tests/test_run_manager.py", "file_name": "test_run_manager.py", "file_ext": "py", "file_size_in_byte": 7633, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "click.testing.CliRunner", "line_number": 34, "usage_type": "call"}, {"api_name": "wandb.apis.internal.Api", "line_number": 44, "usage_type": "call"}, {"api_name": "wandb.apis.internal", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "wandb.wandb_run.Run", "line_number": 48, "usage_type": "call"}, {"api_name": "wandb.run_manager.RunManager", "line_number": 49, "usage_type": "call"}, {"api_name": "wandb.run_manager", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "wandb.run", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "wandb.run", "line_number": 80, "usage_type": "attribute"}, {"api_name": "wandb.__version__", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "wandb.run", "line_number": 96, "usage_type": "attribute"}, {"api_name": "wandb.save", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "wandb.run", "line_number": 99, "usage_type": "attribute"}, {"api_name": "wandb.run_manager.FileEventHandlerThrottledOverwriteMinWait", "line_number": 104, "usage_type": "argument"}, {"api_name": "wandb.run_manager.FileEventHandlerOverwriteDeferred", "line_number": 106, "usage_type": "argument"}, {"api_name": "wandb.run_manager.FileEventHandlerOverwriteOnce", "line_number": 108, "usage_type": "argument"}, {"api_name": "wandb.run_manager.FileEventHandlerOverwrite", "line_number": 110, "usage_type": "argument"}, {"api_name": "wandb.save", "line_number": 121, "usage_type": "call"}, {"api_name": "wandb.save", "line_number": 124, "usage_type": "call"}, {"api_name": "wandb.run_manager.FileEventHandlerThrottledOverwriteMinWait", "line_number": 127, "usage_type": "argument"}, {"api_name": "wandb.file_pusher.FilePusher", "line_number": 137, "usage_type": "argument"}, {"api_name": "wandb.file_pusher.FilePusher", "line_number": 138, "usage_type": "argument"}, {"api_name": "wandb.save", "line_number": 144, "usage_type": "call"}, {"api_name": "wandb.save", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "wandb.wandb_run.RESUME_FNAME", "line_number": 174, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "wandb.wandb_dir", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 184, "usage_type": "call"}, {"api_name": "wandb.run.socket.connection.sendall", "line_number": 186, "usage_type": "call"}, {"api_name": "wandb.run", "line_number": 186, "usage_type": "attribute"}, {"api_name": "click.testing.CliRunner", "line_number": 192, "usage_type": "call"}, {"api_name": "wandb.apis.internal.Api", "line_number": 194, "usage_type": "call"}, {"api_name": "wandb.apis.internal", "line_number": 194, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 196, "usage_type": "call"}, {"api_name": "wandb.wandb_run.Run", "line_number": 198, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 200, "usage_type": "call"}, {"api_name": "wandb.run_manager.RunManager", "line_number": 201, "usage_type": "call"}, {"api_name": "wandb.run_manager", "line_number": 201, "usage_type": "attribute"}]} +{"seq_id": "544567188", "text": "# -*- encoding: utf-8 -*-\nfrom django.shortcuts import get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.http import HttpResponse\nfrom .models import OrderedProduct\nfrom catalog.models import Product\n# Create your views here.\n\n\n@csrf_exempt\ndef add(request):\n if request.method == 'POST':\n data = request.POST\n\n product = get_object_or_404(Product, uuid=data['uuid'])\n\n del data['uuid']\n\n ordered_product = OrderedProduct.objects.create(\n *data,\n product=product\n )\n\n ordered_product.save()\n\n return HttpResponse(True)\n return HttpResponse('Внутренняя ошибка сервера')\n\n@csrf_exempt\ndef remove(request):\n if request.method == 'POST':\n data = request.POST\n\n ordered_product = get_object_or_404(OrderedProduct, uuid=data['uuid'])\n ordered_product.delete()\n\n return HttpResponse(True)\n return HttpResponse('Внутренняя ошибка сервера')\n\n", "sub_path": "ceiling/personal_data/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 16, "usage_type": "call"}, {"api_name": "catalog.models.Product", "line_number": 16, "usage_type": "argument"}, {"api_name": "models.OrderedProduct.objects.create", "line_number": 20, "usage_type": "call"}, {"api_name": "models.OrderedProduct.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.OrderedProduct", "line_number": 20, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 27, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 35, "usage_type": "call"}, {"api_name": "models.OrderedProduct", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.http.HttpResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 39, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "616411896", "text": "import utils as ut\nimport age \n\n############################################################################\n#\n#\tObjective: Collect name information from user while building profile \n#\t\n#\n#\tLinked from: messenger.py -- welcome message\n#\n#\tLinks to: age.py module\n#\n############################################################################\n\n\ndef process(message, user):\n\n\t#=====[ Get relevant info from utils ]=====\n\tuser_id, status, status_num, text = ut.get_info(user,message)\n\n\t#=====[ Status 1 asks for a name and status 2 asks for confirmation. Status 2 without \n\t#=====[ a yes means the user has not confirmed their name]=====\n\tif status_num == 1 or (status_num == 2 and 'yes' not in text):\n\t\t\n\t\t#=====[ If the user enters a name longer than three words, double check to make sure that's ]=====\n\t\t#=====[ what they want to be called ]=====\n\n\t\tname = ''\n\n\t\tif len(text.split()) > 1 or status_num == 2:\n\n\t\t\tword_indicators = ['name is ','call me ', 'is ','i am ',\"i'm \",'im ', ' me ']\n\n\t\t\tfor word in word_indicators:\n\t\t\t\tif word in text:\n\t\t\t\t\tname = text.split(word)[1].strip()\n\t\t\t\t\tname = name[0].upper() + name[1:]\n\t\t\t\t\tbreak\n\n\t\t\tif len(name) < 1:\n\n\t\t\t\t#=====[ If no name detected, and we're asking for confirmation, we ask for name again ]=====\n\t\t\t\tif status_num == 2:\n\n\t\t\t\t\tut.update_user(user, 'status', (\"name\", 1))\n\t\t\t\t\tut.send_response('So what should I call you?', user_id)\n\t\t\t\t\treturn\n\n\t\t\t\telse:\n\n\t\t\t\t\ttext = text[0].upper() + text[1:]\n\t\t\t\t\tuser['profile']['name'] = text\n\t\t\t\t\tut.update_user(user, 'profile', user['profile'])\n\t\t\t\t\tut.update_user(user, 'status', (\"name\", 2))\n\t\t\t\t\tut.send_response('Are you sure you want me to call you \"' + text + '\"?', user_id)\n\t\t\t\t\treturn\n\n\t\t#=====[ If name is one word, accept as name and move to next question ]=====\n\t\telse:\n\n\t\t\tname = text.strip()\n\t\t\tname = name[0].upper() + name[1:]\n\n\n\t\tuser['profile']['name'] = name\n\t\tut.update_user(user, 'profile', user['profile'])\n\t\tut.send_response('Sounds good, ' + user['profile']['name'], user_id)\n\n\t\tage.ask(user, user_id)\n\t\treturn\n\n\t#=====[ Status 2 confirms name ]=====\n\tif status_num == 2:\n\t\t\n\t\tif 'yes' in text:\n\t\t\t\n\t\t\tut.send_response('Sounds good, ' + user['profile']['name'], user_id)\n\n\t\t\tage.ask(user, user_id)\n\t\t\treturn\n\n", "sub_path": "modules/name.py", "file_name": "name.py", "file_ext": "py", "file_size_in_byte": 2230, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "utils.get_info", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.update_user", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.send_response", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.update_user", "line_number": 53, "usage_type": "call"}, {"api_name": "utils.update_user", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.send_response", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.update_user", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.send_response", "line_number": 67, "usage_type": "call"}, {"api_name": "age.ask", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.send_response", "line_number": 77, "usage_type": "call"}, {"api_name": "age.ask", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "613727419", "text": "import pygame, sys, random\nfrom player import Player\nfrom math import sqrt\nfrom bouncing_ball import Ball\nimport numpy as np \nimport time\n\nclass AvoidGame:\n\n\tdef __init__(self, \n\t\t\t\tplayer_size = 7, \n\t\t\t\tball_size_min = 6, ball_size_max = 12,\n\t\t\t\tball_speed_min = -1, ball_speed_max = 2,\n\t\t\t\tshow = True,\n\t\t\t\twidth = 1000, height = 700, \n\t\t\t\tnum_players = 1, num_balls = 5,\n\t\t\t\tedge = 15, speedup = 2, human_play = False, \n\t\t\t\tdifficulty_update_rate_sec = 10,\n\t\t\t\tclock_rate = 300, base_reward = 0.01):\n\n\t\t#Game Metadata Information\n\t\tself.show = show\n\t\tself.width = width\n\t\tself.height = height \n\t\tself.edge = edge \n\t\tself.speedup = speedup\n\t\tself.clock_rate = clock_rate\n\t\tself.player_size = player_size\n\t\tself.num_players = num_players\n\t\tself.human_play = human_play\n\t\tself.num_balls = num_balls\n\t\tself.ball_size_min = ball_size_min\n\t\tself.ball_size_max = ball_size_max\n\t\tself.ball_speed_min = ball_speed_min\n\t\tself.ball_speed_max = ball_speed_max\n\t\tself.difficulty_update_rate = difficulty_update_rate_sec\n\t\tself.base_reward = base_reward\n\t\tself.roster = []\n\t\tself.ball_list = []\n\t\tself.exit_message = {}\n\t\tself.active_game = None\n\t\tself.update_diff_clock = None\n\n\tdef make_ball(self):\n\t \"\"\"\n\t Function to make a new, random ball.\n\t \"\"\"\n\t self.ball_list.append(Ball(self))\n\n\n\tdef update_difficulty_monitor(self):\n\n\t\tif (time.time() - self.update_diff_clock) > self.difficulty_update_rate:\n\t\t\tself.update_difficulty()\n\t\t\tself.update_diff_clock = time.time()\n\n\tdef update_difficulty(self):\n\n\t\tfor i in range(len(self.ball_list) // 4):\n\t\t\tself.num_balls += 1\n\t\t\tself.make_ball()\n\n\tdef make_player(self):\n\t\tif self.human_play:\n\t\t\t\tself.roster.append(Player(self))\n\t\telse:\n\t\t\tself.roster.append(Player(self, \"ADD BRAIN\"))\n\n\tdef update_fitness(self, player):\n\t\tplayer.fitness += self.base_reward * self.num_balls\n\n\tdef update_player_velocity(self, event, player):\n\n\t\tif self.human_play:\n\n\t\t\tif event.type == pygame.KEYUP: \n\t\t\t\tif event.key in [pygame.K_LEFT, pygame.K_RIGHT]: \n\t\t\t\t player.change_x = 0\n\t\t\t\tif event.key in [pygame.K_UP, pygame.K_DOWN]: \n\t\t\t\t player.change_y = 0\n\n\t\t\telif event.type == pygame.KEYDOWN: \n\t\t\t if event.key == pygame.K_LEFT: # left arrow turns left\n\t\t\t player.change_x = -1\n\t\t\t if event.key == pygame.K_RIGHT: # right arrow turns right\n\t\t\t player.change_x = 1\n\t\t\t if event.key == pygame.K_UP: # up arrow goes up\n\t\t\t player.change_y = -1\n\t\t\t if event.key == pygame.K_DOWN: # down arrow goes down\n\t\t\t player.change_y = 1\n\t\t \n\n\tdef check_for_wall_collisions_human_play(self):\n\n\t\tself.exit_message['which_wall'] = \"\"\n\n\t\tplayer = self.roster[0]\n\n\t\tif player.x < self.edge + player.size:\n\t\t\tself.exit_message['which_wall'] = \"left\"\n\t\telif player.x > self.width - self.edge - player.size:\n\t\t\tself.exit_message['which_wall'] = \"right\"\n\t\telif player.y < self.edge + player.size:\n\t\t\tself.exit_message['which_wall'] = \"top\"\n\t\telif player.y > self.height - self.edge - player.size:\n\t\t\tself.exit_message['which_wall'] = \"bottom\"\n\n\t\tif self.exit_message['which_wall'] != \"\":\n\t\t\tself.exit_message['reason'] = \"wall\"\n\t\t\tself.active_game = False\n\t\t\tself.game_stop = time.time()\n\t\t\t#self.running = False\n\n\tdef check_for_ball_collisions_human_play(self):\n\n\t\tplayer = self.roster[0]\n\n\t\tfor ball in self.ball_list:\n\t\t\tif AvoidGame.distance(ball.x, ball.y, player.x, player.y) < (ball.size + player.size):\n\t\t\t\tself.exit_message['reason'] = 'ball'\n\t\t\t\tself.active_game = False\n\t\t\t\tself.game_stop = time.time()\n\t\t\t\t#self.running = False\n\t\t\t\treturn\n\n\tdef evaluate_state(self):\n\n\t\tif self.human_play:\n\t\t\tself.check_for_wall_collisions_human_play()\n\n\t\t\tif self.exit_message.get('reason', None) != \"wall\":\n\t\t\t\tself.check_for_ball_collisions_human_play()\n\n\t\tfor player in self.roster:\n\t\t\tself.network_input(player)\n\n\n\t \t\n\tdef game_running_activities(self):\n\n\t #Quit event etc\n\t for event in pygame.event.get():\n\t if event.type == pygame.QUIT:\n\t pygame.quit()\n\t sys.exit()\n\t running= False\n\n\t for player in self.roster:\n\t \tself.update_player_location(event, player)\n\n\t \n\tdef on_init(self):\n\t\tself.game_start = time.time()\n\t\tself.update_diff_clock = time.time()\n\t\tself.active_game = True\n\n\t\tfor i in range(self.num_balls):\n\t\t\tself.make_ball()\n\n\t\t#Create players\n\t\tfor i in range(self.num_players):\n\t\t\tself.make_player()\n\n\n\t\t#Graphics information\n\t\tif self.show:\n\n\t\t\t#Start up pygame\n\t\t\tpygame.init()\n\t\t\tpygame.display.set_caption(\"NEAT Avoidance Game\")\n\n\t\t\t#Kick off font\n\t\t\tpygame.font.init()\n\t\t\tself.font = pygame.font.SysFont('Arial', 15)\n\n\t\t\t#Screen and clock\n\t\t\tself.clock = pygame.time.Clock()\n\t\t\tself.screen = pygame.display.set_mode((self.width,\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.height))\n\n\t\t\tself.running = True\n\n\tdef on_event(self, event):\n\t\tif event.type == pygame.QUIT:\n\t\t self.running = False\n\n\t\tif not self.active_game \\\n\t\t\tand event.type == pygame.KEYDOWN \\\n\t\t\tand event.key == pygame.K_SPACE:\n\n\t\t\tself = AvoidGame(human_play = True)\n\t\t\tself.on_execute()\n\n\t\tfor player in self.roster:\n\t\t\tself.update_player_velocity(event,player)\n\n\tdef find_valid_start_position(self, obj):\n\n\t\twhile True:\n\t\t\tbad = False\n\t\t\tobj.x = random.randrange(self.edge + obj.size, self.width - self.edge - obj.size)\n\t\t\tobj.y = random.randrange(self.edge + obj.size, self.height - self.edge - obj.size)\n\n\t\t\tfor ball in self.ball_list:\n\t\t\t if AvoidGame.distance(ball.x, ball.y, obj.x, obj.y) < (ball.size + obj.size):\n\t\t\t bad = True\n\n\t\t\tfor player in self.roster:\n\t\t\t\tif AvoidGame.distance(player.x, player.y, obj.x, obj.y) < (ball.size + obj.size):\n\t\t\t\t\tbad = True\n\n\t\t\tif not bad:\n\t\t\t return\n\n\tdef network_input(self, player):\n\n\t\tnew_ball_list = []\n\t\t\n\t\tfor ball in self.ball_list:\n\t\t\tplaced = False\n\t\t\tball.distance = AvoidGame.distance(ball.x, ball.y, player.x, player.y)\n\t\t\tif int(ball.distance) == 0:\n\t\t\t\tprint(\"\\n\\n\\n\\nITS WORKING\\n\\n\\n\\n\")\n\t\t\tfor i in range(len(new_ball_list)):\n\t\t\t\tif ball.distance < new_ball_list[i].distance:\n\t\t\t\t\tnew_ball_list.insert(i, ball)\n\t\t\t\t\tplaced = True\n\t\t\t\t\tbreak\n\t\t\tif not placed:\n\t\t\t\tnew_ball_list.append(ball)\n\n\t\tif len(new_ball_list) > len(self.ball_list):\n\t\t\tprint(len(new_ball_list))\n\t\t\tprint(len(self.ball_list))\n\t\t\traise ValueError(\"Something is wrong\")\n\n\t\tstate_input = [player.x, player.y, player.change_x, player.change_y, player.size]\n\n\t\ttop_5_closest_balls = new_ball_list[:5]\n\n\t\tfor ball in top_5_closest_balls:\n\n\t\t\tstate_input.append(ball.x)\n\t\t\tstate_input.append(ball.y)\n\t\t\tstate_input.append(ball.change_x)\n\t\t\tstate_input.append(ball.change_y)\n\t\t\tstate_input.append(ball.size)\n\n\t\tdist_l_wall = player.x - self.edge - self.player_size\n\t\tdist_r_wall = self.width - player.x - self.edge - self.player_size\n\t\tdist_t_wall = player.y - self.edge - self.player_size\n\t\tdist_b_wall = self.height - player.y - self.edge - self.player_size\n\n\t\tstate_input.append(dist_l_wall)\n\t\tstate_input.append(dist_r_wall)\n\t\tstate_input.append(dist_t_wall)\n\t\tstate_input.append(dist_b_wall)\n\n\t\tprint(state_input)\n\t\treturn state_input\n\n\n\tdef on_loop(self):\n\t\tfor ball in self.ball_list:\n\t\t\tball.update_location()\n\n\t\tfor player in self.roster:\n\t\t\tplayer.update_location()\n\t\t\tself.update_fitness(player)\n\n\t\tself.update_difficulty_monitor()\n\n\tdef on_render(self):\n\n\t\tif self.active_game:\n\t\t\tplayer = self.roster[0] \n\t\t\tx_pos = self.font.render(\"\"\"X-Position: {:>5}\"\"\".format(player.x), False, (0, 0, 0))\n\t\t\ty_pos = self.font.render(\"\"\"Y-Position: {:>5}\"\"\".format(player.y), False, (0, 0, 0))\n\n\t\t\tleft_wall = self.font.render(\"\"\"Dist-L-Wall: {:>5}\"\"\"\\\n\t\t\t\t\t\t\t\t\t.format(round(np.log((player.x - self.edge - self.player_size)/10),3)), \n\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\t\t\tright_wall = self.font.render(\"\"\"Dist-R-Wall: {:>5}\"\"\"\\\n\t\t\t\t\t\t\t\t\t.format(round(np.log((self.width - player.x - self.edge - self.player_size) / 10),3)), \n\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\t\t\ttop_wall = self.font.render(\"\"\"Dist-T-Wall: {:>5}\"\"\"\\\n\t\t\t\t\t\t\t\t\t.format(round(np.log((player.y - self.edge - self.player_size)/10),3)), \n\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\t\t\tbottom_wall = self.font.render(\"\"\"Dist-B-Wall: {:>5}\"\"\"\\\n\t\t\t\t\t\t\t\t\t.format(round(np.log((self.height - player.y - self.edge - self.player_size)/10),3)), \n\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\n\t\t\tseconds_alive = self.font.render(\"\"\"Seconds Alive: {:>8}\"\"\"\\\n\t\t\t\t\t\t\t\t\t.format(int(time.time() - self.game_start)), \n\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\n\t\t\tfitness = self.font.render(\"\"\"Fitness: {:>5}\"\"\"\\\n\t\t\t\t\t\t\t\t\t.format(round(player.fitness,3)), \n\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\n\t\t\t#Game edges\n\t\t\th_edge = pygame.Surface((self.edge, self.height), 0)\n\t\t\th_edge.fill((0,0,0))\n\t\t\tv_edge = pygame.Surface((self.width, self.edge), 0)\n\t\t\tv_edge.fill((0,0,0))\t #print(rect.x, rect.y)\n\t\t\tself.screen.fill((255, 255, 255))\n\n\t\t\t#Draw players\n\t\t\tfor player in self.roster:\n\t\t\t\tpygame.draw.circle(self.screen, (0,0,255), [player.x, player.y], player.size)\n\n\t\t\tfor ball in self.ball_list:\n\t\t\t\tpygame.draw.circle(self.screen, (255,0,0), [ball.x, ball.y], ball.size)\n\n\t\t\tself.screen.blit(v_edge, (0, self.height-self.edge))\n\t\t\tself.screen.blit(h_edge, (self.width-self.edge, 0))\n\t\t\tself.screen.blit(v_edge, (0, 0))\n\t\t\tself.screen.blit(h_edge, (0, 0))\n\n\t\t\tself.screen.blit(x_pos,(self.width - self.edge - 100, self.edge + 10))\n\t\t\tself.screen.blit(y_pos,(self.width - self.edge - 100, self.edge + 30))\n\n\t\t\tself.screen.blit(left_wall,(self.edge + 5, self.edge + 10))\n\t\t\tself.screen.blit(right_wall,(self.edge + 5, self.edge + 30))\n\t\t\tself.screen.blit(top_wall,(self.edge + 5, self.edge + 50))\n\t\t\tself.screen.blit(bottom_wall,(self.edge + 5, self.edge + 70))\n\n\t\t\tself.screen.blit(seconds_alive,(self.edge + 5, self.edge + 110))\n\t\t\tself.screen.blit(fitness,(self.edge + 5, self.edge + 130))\n\n\t\telse:\n\t\t\texit_font_title = pygame.font.SysFont('freesansbold.ttf',200)\n\t\t\texit_font_reason = pygame.font.SysFont('freesansbold.ttf',80)\n\t\t\texit_font_score = pygame.font.SysFont('freesansbold.ttf',30)\n\t\t\texit_reason = None\n\t\t\texit_title = exit_font_title.render(\"\"\"Game Over\"\"\", False, (0, 0, 0))\n\t\t\telapsed_time_score = exit_font_score.render(\"\"\"Total Time Score: {:>5}\"\"\"\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t.format(int(self.game_stop - self.game_start)),\n\t\t\t\t\t\t\t\t\t\t\t\t\t False, (0, 0, 0))\n\n\t\t\tif self.exit_message['reason'] == 'wall':\n\t\t\t\texit_reason = exit_font_reason.render(\"\"\"You ran into the {} wall\"\"\"\\\n\t\t\t\t\t\t\t\t\t\t\t\t.format(self.exit_message['which_wall']),\n\t\t\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\t\t\t\tself.screen.blit(exit_reason,(self.edge + 120, self.edge + 400))\n\t\t\t\tself.screen.blit(elapsed_time_score,(self.edge + 120, self.edge + 500))\n\n\t\t\telif self.exit_message['reason'] == 'ball':\n\t\t\t\texit_reason = exit_font_reason.render(\"\"\"You were hit by a predator ball\"\"\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tFalse, (0, 0, 0))\n\t\t\t\tself.screen.blit(exit_reason,(self.edge + 80, self.edge + 400))\n\t\t\t\tself.screen.blit(elapsed_time_score,(self.edge + 80, self.edge + 500))\n\t\t\t\n\t\t\tself.screen.blit(exit_title,(self.edge + 100, self.edge + 70))\n\t\t\t\n\n\t\t\t\n\n\t\tpygame.display.update()\n\t\tself.clock.tick(self.clock_rate)\n\n\tdef on_cleanup(self):\n\t pygame.quit()\n\n\tdef on_execute(self):\n\t\tself.on_init()\n\t\tself.update_difficulty_monitor()\n\t\twhile(self.running):\n\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tself.on_event(event)\n\t \n\t\t\tif self.active_game:\n\t\t\t\tself.on_loop()\n\n\t\t\t\tif self.show:\n\t\t\t\t\tself.on_render()\n\n\t\t\t\tself.evaluate_state()\n\n\t\t\telif self.show:\n\t\t\t\tself.on_render()\n\n\t\tself.on_cleanup()\n\n\t@staticmethod\n\tdef distance(x1,y1, x2, y2):\n\t\treturn sqrt((x1 - x2)**2 + (y1 - y2)**2)\n \nif __name__ == \"__main__\" :\n Game = AvoidGame(human_play = True, clock_rate = 300, speedup = 2)\n Game.on_execute()", "sub_path": "avoid_balls.py", "file_name": "avoid_balls.py", "file_ext": "py", "file_size_in_byte": 11534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "bouncing_ball.Ball", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 55, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 65, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 67, "usage_type": "call"}, {"api_name": "player.fitness", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 77, "usage_type": "attribute"}, {"api_name": "player.change_x", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "player.change_y", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "player.change_x", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 85, "usage_type": "attribute"}, {"api_name": "player.change_x", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 87, "usage_type": "attribute"}, {"api_name": "player.change_y", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 89, "usage_type": "attribute"}, {"api_name": "player.change_y", "line_number": 90, "usage_type": "attribute"}, {"api_name": "player.x", "line_number": 99, "usage_type": "attribute"}, {"api_name": "player.size", "line_number": 99, "usage_type": "attribute"}, {"api_name": "player.x", "line_number": 101, "usage_type": "attribute"}, {"api_name": "player.size", "line_number": 101, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 103, "usage_type": "attribute"}, {"api_name": "player.size", "line_number": 103, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 105, "usage_type": "attribute"}, {"api_name": "player.size", "line_number": 105, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "player.x", "line_number": 119, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 119, "usage_type": "attribute"}, {"api_name": "player.size", "line_number": 119, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 142, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 144, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 145, "usage_type": "call"}, {"api_name": "time.time", "line_number": 153, "usage_type": "call"}, {"api_name": "time.time", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 169, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 173, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 177, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 189, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 201, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 202, "usage_type": "call"}, {"api_name": "player.x", "line_number": 209, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 209, "usage_type": "attribute"}, {"api_name": "player.x", "line_number": 221, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 221, "usage_type": "attribute"}, {"api_name": "player.x", "line_number": 237, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 237, "usage_type": "attribute"}, {"api_name": "player.change_x", "line_number": 237, "usage_type": "attribute"}, {"api_name": "player.change_y", "line_number": 237, "usage_type": "attribute"}, {"api_name": "player.size", "line_number": 237, "usage_type": "attribute"}, {"api_name": "player.x", "line_number": 249, "usage_type": "attribute"}, {"api_name": "player.x", "line_number": 250, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 251, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 252, "usage_type": "attribute"}, {"api_name": "player.update_location", "line_number": 268, "usage_type": "call"}, {"api_name": "player.x", "line_number": 277, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 278, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 281, "usage_type": "call"}, {"api_name": "player.x", "line_number": 281, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 284, "usage_type": "call"}, {"api_name": "player.x", "line_number": 284, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 287, "usage_type": "call"}, {"api_name": "player.y", "line_number": 287, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 290, "usage_type": "call"}, {"api_name": "player.y", "line_number": 290, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 294, "usage_type": "call"}, {"api_name": "player.fitness", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 302, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 304, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 310, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 310, "usage_type": "attribute"}, {"api_name": "player.x", "line_number": 310, "usage_type": "attribute"}, {"api_name": "player.y", "line_number": 310, "usage_type": "attribute"}, {"api_name": "player.size", "line_number": 310, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 313, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 313, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 332, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 332, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 333, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 333, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 334, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 334, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 359, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 359, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 363, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 370, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 370, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 388, "usage_type": "call"}]} +{"seq_id": "42141962", "text": "from fastapi import FastAPI, File, UploadFile\nfrom fastapi.responses import HTMLResponse\nfrom mangum import Mangum\nfrom pydantic import BaseModel\nimport boto3\nimport io\nimport os\napp = FastAPI()\n\nclass ImageParam(BaseModel):\n image: str\n\n@app.post(\"/files/\")\ndef create_file(file: UploadFile = File(...)):\n service_name = \"sample-fastapi\"\n bucket_name = service_name + \"-resources-sls-imgageup-uploadimages\"\n target_name = os.path.basename(file.filename)\n s3 = boto3.resource('s3')\n contents = file.file.read()\n s3.Bucket(bucket_name).upload_fileobj(io.BytesIO(contents), target_name)\n bucket_location = boto3.client('s3').get_bucket_location(Bucket=bucket_name)\n\n url = f\"https://s3-{bucket_location['LocationConstraint']}.amazonaws.com/{bucket_name}/{target_name}\"\n return {\"url\": url}\n\n@app.get(\"/form\")\nasync def main():\n content = \"\"\"\n \n \n FastAPI Form Test\n \n \n

\n
\n \n \n
\n \n \"\"\"\n return HTMLResponse(content=content)\n\nhandler = Mangum(app)", "sub_path": "apps/app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1226, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "fastapi.FastAPI", "line_number": 8, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 10, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 14, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "boto3.resource", "line_number": 18, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 20, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 21, "usage_type": "call"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 41, "usage_type": "call"}, {"api_name": "mangum.Mangum", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "579709657", "text": "from django.db import models\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom picklefield.fields import PickledObjectField\nfrom django.contrib.auth.models import User as AuthUser\n\nimport basic_common\nfrom beta_invite.models import EmailType, Campaign\nfrom business.models import BusinessUser\nfrom dashboard.models import Candidate\n\n\nclass CandidateEmailSent(models.Model):\n\n email_type = models.ForeignKey(EmailType, on_delete=models.DO_NOTHING)\n an_object = models.ForeignKey(Candidate, on_delete=models.DO_NOTHING, null=True)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}, candidate_id={1}, email_type={2}'.format(self.pk, self.an_object.id, self.email_type)\n\n # adds custom table name\n class Meta:\n db_table = 'candidate_emails_sent'\n\n\nclass BusinessUserEmailSent(models.Model):\n\n email_type = models.ForeignKey(EmailType, on_delete=models.DO_NOTHING)\n an_object = models.ForeignKey(BusinessUser, on_delete=models.DO_NOTHING, null=True)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}, candidate_id={1}, email_type={2}'.format(self.pk, self.an_object.id, self.email_type)\n\n # adds custom table name\n class Meta:\n db_table = 'business_user_emails_sent'\n\n\nclass CampaignEmailSent(models.Model):\n\n email_type = models.ForeignKey(EmailType, on_delete=models.DO_NOTHING)\n an_object = models.ForeignKey(Campaign, on_delete=models.DO_NOTHING, null=True)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}, candidate_id={1}, email_type={2}'.format(self.pk, self.an_object.id, self.email_type)\n\n # adds custom table name\n class Meta:\n db_table = 'campaign_emails_sent'\n\n\n# -------------------------------------------------------------------------------------------------------------------- #\n\n\nclass CandidatePendingEmail(models.Model):\n\n the_objects = models.ManyToManyField(Candidate)\n language_code = models.CharField(max_length=3)\n body_input = models.CharField(max_length=10000)\n subject = models.CharField(max_length=200)\n email_type = models.ForeignKey(EmailType, on_delete=models.DO_NOTHING)\n\n # optional\n with_localization = models.BooleanField(default=True)\n body_is_filename = models.BooleanField(default=True)\n override_dict = PickledObjectField(default={})\n\n # internal\n sent = models.BooleanField(default=False)\n processed = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}'.format(self.pk)\n\n @staticmethod\n def add_to_queue(**kwargs):\n\n candidates = kwargs.pop('the_objects', None)\n\n email = CandidatePendingEmail(**kwargs)\n email.save()\n email.save_candidates(candidates)\n\n def save_candidates(self, candidates):\n \"\"\"\n Can __init__ with 1 candidate or a list of candidates.\n :param candidates:\n :return:\n \"\"\"\n if candidates and type(candidates) != list:\n candidates = [candidates]\n\n self.the_objects.set(candidates)\n self.save()\n\n # adds custom table name\n class Meta:\n db_table = 'candidate_pending_emails'\n\n\nclass BusinessUserPendingEmail(models.Model):\n\n the_objects = models.ManyToManyField(BusinessUser)\n language_code = models.CharField(max_length=3)\n body_input = models.CharField(max_length=10000)\n subject = models.CharField(max_length=200)\n email_type = models.ForeignKey(EmailType, on_delete=models.DO_NOTHING, null=True)\n\n # optional\n with_localization = models.BooleanField(default=True)\n body_is_filename = models.BooleanField(default=True)\n override_dict = PickledObjectField(default={})\n\n # internal\n sent = models.BooleanField(default=False)\n processed = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}'.format(self.pk)\n\n @staticmethod\n def add_to_queue(**kwargs):\n\n business_users = kwargs.pop('the_objects', None)\n\n email = BusinessUserPendingEmail(**kwargs)\n email.save()\n email.save_business_users(business_users)\n\n def save_business_users(self, business_users):\n \"\"\"\n Can __init__ with 1 business_user or a list of business_users.\n :param business_users:\n :return:\n \"\"\"\n if business_users and type(business_users) != list:\n business_users = [business_users]\n\n self.the_objects.set(business_users)\n self.save()\n\n # adds custom table name\n class Meta:\n db_table = 'business_user_pending_emails'\n\n\nclass CampaignPendingEmail(models.Model):\n\n the_objects = models.ManyToManyField(Campaign)\n language_code = models.CharField(max_length=3)\n body_input = models.CharField(max_length=10000)\n subject = models.CharField(max_length=200)\n email_type = models.ForeignKey(EmailType, on_delete=models.DO_NOTHING, null=True)\n\n # optional\n with_localization = models.BooleanField(default=True)\n body_is_filename = models.BooleanField(default=True)\n override_dict = PickledObjectField(default={})\n\n # internal\n sent = models.BooleanField(default=False)\n processed = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}'.format(self.pk)\n\n @staticmethod\n def add_to_queue(**kwargs):\n\n campaigns = kwargs.pop('the_objects', None)\n\n email = CampaignPendingEmail(**kwargs)\n email.save()\n email.save_campaigns(campaigns)\n\n def save_campaigns(self, campaigns):\n \"\"\"\n Can __init__ with 1 campaign or a list of campaigns.\n :param campaigns:\n :return:\n \"\"\"\n if campaigns and type(campaigns) != list:\n campaigns = [campaigns]\n\n self.the_objects.set(campaigns)\n self.save()\n\n # adds custom table name\n class Meta:\n db_table = 'campaign_pending_emails'\n\n\nclass ActionType(models.Model):\n\n code = models.CharField(max_length=3, primary_key=True)\n name = models.CharField(max_length=200, default='')\n description = models.CharField(max_length=1000, default='')\n\n def __str__(self):\n return 'id={0}, code={1}, name={2}, description={3}'.format(self.pk,\n self.code,\n self.name,\n self.description)\n\n # adds custom table name\n class Meta:\n db_table = 'action_types'\n\n\nclass Action(models.Model):\n\n code = models.CharField(max_length=10, default='')\n name = models.CharField(max_length=200, default='')\n description = models.CharField(max_length=1000, default='')\n type = models.ForeignKey(ActionType, default=None, null=True, on_delete=models.SET_NULL)\n\n def __str__(self):\n return 'id={0}, code={1}, name={2}, description={3}'.format(self.pk,\n self.code,\n self.name,\n self.description)\n\n # adds custom table name\n class Meta:\n db_table = 'actions'\n\n def get_url(self):\n return basic_common.get_host() + '/dashboard/stats/' + str(self.name).replace(' ', '-')\n\n\nclass ActionLog(models.Model):\n\n action = models.ForeignKey(Action, on_delete=models.CASCADE)\n candidate = models.ForeignKey(Candidate, null=True, on_delete=models.SET_NULL)\n campaign = models.ForeignKey(Campaign, null=True, on_delete=models.SET_NULL)\n auth_user = models.ForeignKey(AuthUser, null=True, on_delete=models.SET_NULL)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}, action={1}, candidate_id={2}, created_at={3}'.format(self.pk,\n self.action,\n self.candidate.pk,\n self.created_at)\n\n @staticmethod\n def create(code, candidate=None, campaign=None, request=None):\n\n if campaign is None and isinstance(candidate, Candidate):\n campaign = candidate.campaign\n\n if request is not None and isinstance(request.user, AuthUser):\n auth_user = request.user\n else:\n auth_user = None\n\n ActionLog(action=Action.objects.get(code=code),\n candidate=candidate,\n campaign=campaign,\n auth_user=auth_user).save()\n\n # adds custom table name\n class Meta:\n db_table = 'action_logs'\n\n\nclass PaymentLog(models.Model):\n\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)\n candidate_num = models.IntegerField(default=0)\n is_unlimited = models.BooleanField(default=False)\n tax = models.FloatField()\n total = models.FloatField()\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}, campaign_id={1}, created_at={2}'.format(self.pk,\n self.campaign.pk,\n self.created_at)\n\n # adds custom table name\n class Meta:\n db_table = 'payment_logs'\n\n\nclass CandidatePopUp(models.Model):\n\n candidate = models.ForeignKey(Candidate, null=True, on_delete=models.SET_NULL)\n message = models.CharField(max_length=500)\n\n # internal\n sent = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return 'id={0}'.format(self.pk)\n\n @classmethod\n def get_last_popup(cls, candidate):\n\n popup = CandidatePopUp.objects.filter(sent=False, candidate=candidate).last()\n if popup:\n popup.sent = True\n popup.save()\n return popup\n\n class Meta:\n db_table = 'candidate_popups'\n", "sub_path": "testing_webpage/testing_webpage/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 10788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.models.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 14, "usage_type": "call"}, {"api_name": "beta_invite.models.EmailType", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 15, "usage_type": "call"}, {"api_name": "dashboard.models.Candidate", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 30, "usage_type": "call"}, {"api_name": "beta_invite.models.EmailType", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 31, "usage_type": "call"}, {"api_name": "business.models.BusinessUser", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 46, "usage_type": "call"}, {"api_name": "beta_invite.models.EmailType", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 47, "usage_type": "call"}, {"api_name": "beta_invite.models.Campaign", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 65, "usage_type": "call"}, {"api_name": "dashboard.models.Candidate", "line_number": 65, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 69, "usage_type": "call"}, {"api_name": "beta_invite.models.EmailType", "line_number": 69, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.db.models.BooleanField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "picklefield.fields.PickledObjectField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 111, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 111, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 113, "usage_type": "call"}, {"api_name": "business.models.BusinessUser", "line_number": 113, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 115, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 115, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 116, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 116, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 117, "usage_type": "call"}, {"api_name": "beta_invite.models.EmailType", "line_number": 117, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 117, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 117, "usage_type": "attribute"}, {"api_name": "django.db.models.BooleanField", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 120, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 121, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 121, "usage_type": "name"}, {"api_name": "picklefield.fields.PickledObjectField", "line_number": 122, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 125, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 126, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 159, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 159, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 161, "usage_type": "call"}, {"api_name": "beta_invite.models.Campaign", "line_number": 161, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 161, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 162, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 162, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 163, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 163, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 164, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 164, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 165, "usage_type": "call"}, {"api_name": "beta_invite.models.EmailType", "line_number": 165, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 165, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 165, "usage_type": "attribute"}, {"api_name": "django.db.models.BooleanField", "line_number": 168, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 168, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 169, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 169, "usage_type": "name"}, {"api_name": "picklefield.fields.PickledObjectField", "line_number": 170, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 173, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 173, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 174, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 174, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 175, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 175, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 176, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 176, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 207, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 207, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 209, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 209, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 210, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 210, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 211, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 211, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 224, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 224, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 226, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 226, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 227, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 227, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 228, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 228, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 229, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 229, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 229, "usage_type": "attribute"}, {"api_name": "basic_common.get_host", "line_number": 242, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 245, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 245, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 247, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 247, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 247, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 248, "usage_type": "call"}, {"api_name": "dashboard.models.Candidate", "line_number": 248, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 248, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 248, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 249, "usage_type": "call"}, {"api_name": "beta_invite.models.Campaign", "line_number": 249, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 249, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 249, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 250, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 250, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 250, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 250, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 252, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 252, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 253, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 253, "usage_type": "name"}, {"api_name": "dashboard.models.Candidate", "line_number": 264, "usage_type": "argument"}, {"api_name": "django.contrib.auth.models.User", "line_number": 267, "usage_type": "argument"}, {"api_name": "django.db.models.Model", "line_number": 282, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 282, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 284, "usage_type": "call"}, {"api_name": "beta_invite.models.Campaign", "line_number": 284, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 284, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 284, "usage_type": "attribute"}, {"api_name": "django.db.models.IntegerField", "line_number": 285, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 285, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 286, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 286, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 287, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 287, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 288, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 288, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 290, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 290, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 291, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 291, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 303, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 303, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 305, "usage_type": "call"}, {"api_name": "dashboard.models.Candidate", "line_number": 305, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 305, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 305, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 306, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 306, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 309, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 309, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 310, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 310, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 311, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 311, "usage_type": "name"}]} +{"seq_id": "95432705", "text": "from ..model import getModel\nfrom ..data import Cifar10Data\nfrom tqdm import trange\nimport torch\nfrom pathlib import Path\nfrom functools import partial\n\ndef check_for_dir(*args_path):\n for path in args_path:\n if not path.exists():\n path.mkdir(parents=True)\n\ndef delete_file(path):\n if path.exists() and not path.is_dir():\n path.unlink()\n\ndef logger(filepath , *args,**kwargs):\n print(*args,**kwargs)\n with open(filepath,\"a\") as f: # appends to file and closes it when finished\n print(file=f,*args,**kwargs)\n\ndef train_model(model,\n data_loader ,\n optimizer=None,\n criterion =None,\n num_epochs=5 ,\n save_model_filename=\"saved_weights.pt\",\n log_filename=\"training_logs.txt\"):\n if optimizer is None:\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n if criterion is None:\n criterion =torch.nn.CrossEntropyLoss()\n global logger\n log_filename_path = Path(\"./src/training_logs/\")\n save_model_filename_path = Path(\"./src/saved_weights/\")\n check_for_dir(log_filename_path,save_model_filename_path)\n save_model_filename_path = save_model_filename_path/save_model_filename\n log_filename_path = log_filename_path/log_filename\n delete_file(log_filename_path)\n logger = partial(logger,log_filename_path) \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n best_val_loss = float(\"inf\")\n for epoch in trange(num_epochs,desc=\"Epochs\"):\n result = [f\"[ Epochs {epoch} | {num_epochs} ] : \"]\n for phase in ['train', 'val']:\n if phase==\"train\": # put the model in training mode\n model.train()\n else: # put the model in validation mode\n model.eval()\n\n # keep track of training and validation loss\n running_loss = 0.0\n running_corrects = 0.0\n for data , target in data_loader[phase]:\n #load the data and target to respective device\n data , target = data.to(device) , target.to(device)\n\n with torch.set_grad_enabled(phase==\"train\"):\n #feed the input\n output = model(data)\n #calculate the loss\n loss = criterion(output,target)\n preds = torch.argmax(output,1)\n\n if phase==\"train\" :\n # backward pass: compute gradient of the loss with respect to model parameters \n loss.backward()\n # update the model parameters\n optimizer.step()\n # zero the grad to stop it from accumulating\n optimizer.zero_grad()\n\n # statistics\n running_loss += loss.item() * data.size(0)\n running_corrects += torch.sum(preds == target.data).item()\n\n epoch_loss = running_loss / len(data_loader[phase].dataset)\n epoch_acc = running_corrects / len(data_loader[phase].dataset)\n if phase ==\"val\":\n if epoch_loss < best_val_loss:\n logger(f\"Saving the current best model. Previous best loss = {best_val_loss} Current best loss = {epoch_loss}\")\n best_val_loss = epoch_loss\n torch.save(model.module.state_dict(),save_model_filename_path)\n result.append('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n logger(\" \".join(result))\n\n\n", "sub_path": "src/training/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.optim.Adam", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 30, "usage_type": "attribute"}, {"api_name": "model.parameters", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 34, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 35, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tqdm.trange", "line_number": 43, "usage_type": "call"}, {"api_name": "model.train", "line_number": 47, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 49, "usage_type": "call"}, {"api_name": "data.to", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.set_grad_enabled", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 63, "usage_type": "call"}, {"api_name": "data.size", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 83, "usage_type": "call"}, {"api_name": "model.module.state_dict", "line_number": 83, "usage_type": "call"}, {"api_name": "model.module", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "549415995", "text": "import pandas as pd\nimport os.path\n\ncurdir = r\"C:\\Users\\Asish\\Documents\\Courses\\Python Based Data Analytics\\Final Project\"\nprint(curdir)\n\n#for j in range(0,10):\n# year = ['2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']\n# dHome = \"/Users/HimanshuBharara/Documents/CU-Sem2/IEORE4571/Projects/Lyrics/\"+year[j]+\"/\"\n# print(dHome)\n\ndHome = os.path.join(curdir,\"Lyrics/2006/\")\nprint(dHome)\nlyrics_array = []\nprint(lyrics_array)\nfor i in range(0,100):\n try:\n name=dHome+str(i)+\".\"+\"txt\"\n file=open(name,'r+')\n lyrics_array.append(file.read())\n #globals()['string%s' % i] = file.read()\n file.close()\n except ValueError:\n print(i)\n\ndHome = os.path.join(curdir, \"Lyrics/2006/\")\nname= os.path.join(curdir, \"Lyrics/2006/74.txt\")\nfile=open(name,'r+')\n\nfrom collections import Counter\n\nfor doc in lyrics_array:\n tf = Counter()\n for word in doc.split():\n tf[word] +=1\n\nimport string #allows for format()\n \ndef build_lexicon(corpus):\n lexicon = set()\n for doc in corpus:\n lexicon.update([word for word in doc.split()])\n return lexicon\n\ndef tf(term, document):\n return freq(term, document)\n\ndef freq(term, document):\n return document.split().count(term)\n\nvocabulary = build_lexicon(lyrics_array)\n\ndoc_term_matrix = []\n#print ('Our vocabulary vector is [' + ', '.join(list(vocabulary)) + ']')\nfor doc in lyrics_array:\n #print ('The doc is \"' + doc + '\"')\n tf_vector = [tf(word, doc) for word in vocabulary]\n tf_vector_string = ', '.join(format(freq, 'd') for freq in tf_vector)\n #print ('The tf vector for Document %d is [%s]' % ((lyrics_array.index(doc)+1), tf_vector_string))\n doc_term_matrix.append(tf_vector)\n \n # here's a test: why did I wrap mydoclist.index(doc)+1 in parens? it returns an int...\n # try it! type(mydoclist.index(doc) + 1)\n\nprint (\"All combined, here is our master document term matrix:\")\n#print (doc_term_matrix)\n\nimport math\nimport numpy as np\n\ndef l2_normalizer(vec):\n denom = np.sum([el**2 for el in vec])\n if denom == 0:\n return [(0) for el in vec]\n else:\n return [(el / math.sqrt(denom)) for el in vec]\n \ndoc_term_matrix_l2 = []\nfor vec in doc_term_matrix:\n doc_term_matrix_l2.append(l2_normalizer(vec))\n\nprint ('A regular old document term matrix: ')\nprint (np.matrix(doc_term_matrix))\nprint ('\\nA document term matrix with row-wise L2 norms of 1:')\nprint (np.matrix(doc_term_matrix_l2))\n\ndef numDocsContaining(word, doclist):\n doccount = 0\n for doc in doclist:\n if freq(word, doc) > 0:\n doccount +=1\n return doccount \n\ndef idf(word, doclist):\n n_samples = len(doclist)\n df = numDocsContaining(word, doclist)\n return np.log(n_samples / 1+df)\n\nmy_idf_vector = [idf(word, lyrics_array) for word in vocabulary]\n\nprint(len(vocabulary))\n\nimport numpy as np\n\ndef build_idf_matrix(idf_vector):\n idf_mat = np.zeros((len(idf_vector), len(idf_vector)))\n np.fill_diagonal(idf_mat, idf_vector)\n return idf_mat\n\nmy_idf_matrix = build_idf_matrix(my_idf_vector)\n\ndoc_term_matrix_tfidf = []\ndHome = curdir\n#performing tf-idf matrix multiplication\nfor tf_vector in doc_term_matrix:\n doc_term_matrix_tfidf.append(np.dot(tf_vector, my_idf_matrix))\n\n#normalizing\ndoc_term_matrix_tfidf_l2 = []\nfor tf_vector in doc_term_matrix_tfidf:\n doc_term_matrix_tfidf_l2.append(l2_normalizer(tf_vector))\n \n#print vocabulary\ni=2006\nprint(np.matrix(doc_term_matrix_tfidf_l2)) # np.matrix() just to make it easier to look at\ndic_t = dHome+str(i)+\"_\"+\"dict\"+\".txt\"\nvec=dHome+str(i)+\"_\"+\"vector\"+\".\"+\"txt\"\n\nfrom nltk.corpus import wordnet\n\nlist1 = vocabulary\nlist2 = ['anger', 'surprise', 'joy', 'sadness', 'love', 'fear']\n\nlist3 = []\n\nfor i,word1 in enumerate(list1):\n k = []\n for word2 in list2:\n wordFromList1 = wordnet.synsets(word1)\n wordFromList2 = wordnet.synsets(word2)\n if wordFromList1 and wordFromList2: #Thanks to @alexis' note\n s = wordFromList1[0].wup_similarity(wordFromList2[0])\n k.append(s)\n if k == [] or k == [None, None, None, None, None, None]:\n k = [0.,0.,0.,0.,0.,0.]\n list3.append(k)\n #print(word1, k)\n #if i == 10:\n # break\nb = np.array(list3)\nprint(b.shape)\n", "sub_path": "nocleaning.py", "file_name": "nocleaning.py", "file_ext": "py", "file_size_in_byte": 4319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 27, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 72, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 125, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet.synsets", "line_number": 139, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 139, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.synsets", "line_number": 140, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "36141557", "text": "from doppelkopf.toggles import Toggle\nfrom doppelkopf.db import db\nfrom flask import json\n\n\ndef test_index(client):\n response = client.get(\"/api/\")\n assert response.status_code == 200\n assert b\"Healthy\" in response.data\n\n\ndef test_should_create_game(client):\n response = client.post(\"/api/game\")\n data = response.get_json()\n\n assert response.status_code == 201\n assert data[\"game\"][\"id\"] is not None\n assert data[\"game\"][\"players\"] == []\n\n\ndef test_should_get_game(client):\n game_id = start_game(client)\n\n response = client.get(f\"/api/game/{game_id}\")\n data = response.get_json()\n\n assert response.status_code == 200\n assert data[\"game\"][\"id\"] == game_id\n assert data[\"game\"][\"players\"] == []\n\n\ndef test_should_404_when_getting_unknown_game(client):\n game_id = 9999\n\n response = client.get(f\"/api/game/{game_id}\")\n data = response.get_json()\n\n assert response.status_code == 404\n\n\ndef test_should_add_cors_header_when_creating_game(client):\n response = client.post(\"/api/game\")\n assert response.headers[\"Access-Control-Allow-Origin\"] == \"*\"\n\n\ndef test_should_join_game(client):\n game_id = start_game(client)\n\n payload = {\"player\": {\"name\": \"April\"}}\n response = client.post(f\"/api/game/{game_id}/join\", json=payload)\n data = response.get_json()\n\n assert response.status_code == 200\n assert data[\"game\"][\"id\"] == game_id\n assert data[\"game\"][\"players\"] == [{\"name\": \"April\"}]\n\n\ndef test_should_add_cors_header_when_joining_game(client):\n game_id = start_game(client)\n\n payload = {\"player\": {\"name\": \"April\"}}\n response = client.post(f\"/api/game/{game_id}/join\", json=payload)\n\n assert response.headers[\"Access-Control-Allow-Origin\"] == \"*\"\n\n\ndef test_should_return_bad_request_when_joining_game_without_data(client):\n game_id = start_game(client)\n\n response = client.post(f\"/api/game/{game_id}/join\")\n\n assert response.status_code == 400\n\n\ndef test_should_return_not_found_when_joining_unknown_game(client):\n game_id = 42\n\n payload = {\"player\": {\"name\": \"April\"}}\n response = client.post(f\"/api/game/{game_id}/join\", json=payload)\n\n assert response.status_code == 404\n\n\ndef test_should_return_toggles(client):\n save_toggle(\"some-toggle\", enabled=True)\n save_toggle(\"another-toggle\", enabled=False)\n\n response = client.get(\"/api/features\")\n data = response.get_json()\n\n expected_data = \"\"\"\n {\n \"features\": {\n \"some-toggle\": true,\n \"another-toggle\": false\n }\n }\"\"\"\n\n assert response.status_code == 200\n assert data == json.loads(expected_data)\n\n\ndef save_toggle(name=\"some-toggle\", enabled=True) -> Toggle:\n toggle = Toggle(name=name, enabled=enabled)\n db.session.add(toggle)\n db.session.commit()\n return toggle\n\n\ndef start_game(client) -> int:\n response = client.post(\"/api/game\")\n return response.get_json()[\"game\"][\"id\"]\n", "sub_path": "backend/test/test_api.py", "file_name": "test_api.py", "file_ext": "py", "file_size_in_byte": 2916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.json.loads", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 100, "usage_type": "name"}, {"api_name": "doppelkopf.toggles.Toggle", "line_number": 104, "usage_type": "call"}, {"api_name": "doppelkopf.db.db.session.add", "line_number": 105, "usage_type": "call"}, {"api_name": "doppelkopf.db.db.session", "line_number": 105, "usage_type": "attribute"}, {"api_name": "doppelkopf.db.db", "line_number": 105, "usage_type": "name"}, {"api_name": "doppelkopf.db.db.session.commit", "line_number": 106, "usage_type": "call"}, {"api_name": "doppelkopf.db.db.session", "line_number": 106, "usage_type": "attribute"}, {"api_name": "doppelkopf.db.db", "line_number": 106, "usage_type": "name"}, {"api_name": "doppelkopf.toggles.Toggle", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "450994579", "text": "\"\"\"def reading_sequence():\r\n input = open(\"input.txt\", \"r\")\r\n readable_seq = open(\"readable_seq.txt\", \"w\")\r\n input_lines = input.readlines()\r\n input_lines.append(\"stop\")\r\n a = 0\r\n while input_lines[a] != \"stop\":\r\n sequence = \"\"\r\n if input_lines[a][0] == \">\":\r\n readable_seq.write(input_lines[a].replace(\"\\r\",\"\").replace(\"\\n\",\"\") + \"\\n\")\r\n a += 1\r\n else:\r\n while input_lines[a][0] != \">\":\r\n sequence += str(input_lines[a]).replace(\"\\r\",\"\").replace(\"\\n\",\"\")\r\n a+=1\r\n if input_lines[a] == \"stop\" or input_lines[a][0] == \">\":\r\n break\r\n readable_seq.write(sequence + \"\\n\")\r\n if input_lines[a] == \"stop\":\r\n break\r\n readable_seq.close()\r\nreading_sequence()\r\ninput = open(\"readable_seq.txt\", \"r\")\r\ninput_lines = input.readlines()\r\n\r\nprofile = {\r\n \"A\":[],\r\n \"C\":[],\r\n \"G\":[],\r\n \"T\":[]\r\n}\r\n\r\nn = 0\r\nconsensus = \"\"\r\nwhile n != len(input_lines[1].replace(\"\\n\",\"\")):\r\n a = 0\r\n c = 0\r\n g = 0\r\n t = 0\r\n for line in input_lines[1::2]:\r\n if line[n] == \"A\":\r\n a += 1\r\n elif line[n] == \"C\":\r\n c += 1\r\n elif line[n] == \"G\":\r\n g += 1\r\n elif line[n] == \"T\":\r\n t += 1\r\n profile[\"A\"] += str(a)\r\n profile[\"C\"] += str(c)\r\n profile[\"G\"] += str(g)\r\n profile[\"T\"] += str(t)\r\n if a == max([a, c, g, t]):\r\n consensus += \"A\"\r\n elif c == max([a, c, g, t]):\r\n consensus += \"C\"\r\n elif g == max([a, c, g, t]):\r\n consensus += \"G\"\r\n elif t == max([a, c, g, t]):\r\n consensus += \"T\"\r\n n += 1\r\nprint(consensus)\r\nfor key in profile:\r\n print(key + \": \" + str(profile[key]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\",\",\"\").replace(\"'\",\"\"))\"\"\"\r\n\r\n\r\n## Pseudo code\r\n\r\n#Uses the \"list of lists\" approach to creating a matrix and assumes we're given three strings.\r\n\r\n\"\"\"class MatrixObject:\r\n self.matrix = [ [length of DNA string], [length of DNA string], [length of DNA string] ]\r\n self.profile = [ [A-count goes here], [G-count goes here], [C-count goes here], [T-count goes here] ]\r\n #Each of the lists inside of self.profile is the same length as the DNA string.\r\n def add_string_to_matrix(dna_string): \r\n self.matrix.append(list(dna_string))\r\n def create_profile():\r\n pos = 0\r\n for strand in self.matrix:\r\n for nuc in strand:\r\n if nuc == 'A':\r\n self.profile[0][pos] += 1\r\n if nuc == 'G':\r\n self.profile[1][pos] += 1\r\n if nuc == 'C':\r\n self.profile[2][pos] += 1\r\n if nuc == 'T':\r\n self.profile[3][pos] += 1\r\n pos += 1\r\n def create_con_string():\r\n con_string = \"\"\r\n for pos in range(length of DNA string):\r\n arg_max(self.profile[0][pos], self.profile[1][pos], self.profile[2][pos], self.profile[3][pos])\r\n if arg_max == 0:\r\n con_string += 'A'\r\n if arg_max == 1:\r\n con_string += 'G'\r\n if arg_max == 2:\r\n con_string += 'C'\r\n if arg_max == 3:\r\n con_string += 'T'\r\n return con_string\"\"\"\r\n\r\n# with biopython\r\n\r\nfrom Bio import SeqIO\r\nfrom collections import Counter\r\ns=[]\r\nhandle = open('rosalind_cons.txt', \"r\")\r\nfor record in SeqIO.parse(handle, \"fasta\"):\r\n s.append(str(record.seq))\r\nhandle.close()\r\n\r\na=[Counter([seq[x] for seq in s]) for x in range(len(s[0]))]\r\nprint(''.join([a[x].most_common(1)[0][0] for x in range(len(a))]))\r\nprint('A: ' + ' '.join([str(a[x]['A']) for x in range(len(a))]))\r\nprint('C: ' + ' '.join([str(a[x]['C']) for x in range(len(a))]))\r\nprint('G: ' + ' '.join([str(a[x]['G']) for x in range(len(a))]))\r\nprint('T: ' + ' '.join([str(a[x]['T']) for x in range(len(a))]))", "sub_path": "Rosalind #10 - Consensus and Profile.py", "file_name": "Rosalind #10 - Consensus and Profile.py", "file_ext": "py", "file_size_in_byte": 3756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "Bio.SeqIO.parse", "line_number": 110, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 110, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "628524112", "text": "import asyncio\nimport random\n\nimport wrapt\n\nimport rubrix\nfrom rubrix.monitoring.helpers import start_loop_in_thread\n\n_LOGGING_LOOP = None\n\n\ndef _get_current_loop():\n global _LOGGING_LOOP\n if not _LOGGING_LOOP:\n _LOGGING_LOOP = start_loop_in_thread()\n return _LOGGING_LOOP\n\n\nclass ModelNotSupportedError(Exception):\n pass\n\n\nclass BaseMonitor(wrapt.ObjectProxy):\n \"\"\"\n A base monitor class for easy task model monitoring\n\n Attributes:\n -----------\n\n dataset:\n Rubrix dataset name\n\n sample_rate:\n The portion of the data to store in Rubrix. Default = 0.2\n\n \"\"\"\n\n def __init__(self, *args, dataset: str, sample_rate: float, **kwargs):\n super().__init__(*args, **kwargs)\n\n assert dataset, \"Missing dataset\"\n assert (\n 0.0 < sample_rate <= 1.0\n ), \"Wrong sample rate. Set a value in (0, 1] range.\"\n\n self.dataset = dataset\n self.sample_rate = sample_rate\n\n @property\n def __model__(self):\n \"\"\"Return the monitored task model\"\"\"\n return self.__wrapped__\n\n def is_record_accepted(self) -> bool:\n \"\"\"Return True if a record should be logged to rubrix\"\"\"\n return random.uniform(0.0, 1.0) <= self.sample_rate\n\n def _log2rubrix(self, *args, **kwargs):\n raise NotImplementedError()\n\n def log_async(self, *args, **kwargs):\n wrapped_func = self._log2rubrix\n loop = _get_current_loop()\n\n async def f():\n return wrapped_func(*args, **kwargs)\n\n asyncio.run_coroutine_threadsafe(f(), loop)\n", "sub_path": "src/rubrix/monitoring/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 1571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rubrix.monitoring.helpers.start_loop_in_thread", "line_number": 15, "usage_type": "call"}, {"api_name": "wrapt.ObjectProxy", "line_number": 23, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 56, "usage_type": "call"}, {"api_name": "asyncio.run_coroutine_threadsafe", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "625878599", "text": "# IMPORTATIONS\n\nprint(\"Importing librairies ...\")\n\nimport tensorflow \nimport os\nimport numpy as np\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing import image\nimport pandas as pd\nfrom keras.callbacks import EarlyStopping\n\n# MODEL INITIALIZATION\n\nprint(\"Initializing model ...\")\n\nmodel = Sequential()\n\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(256, 256, 3)))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(4, 4)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(4, 4)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(4, 4)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense((33)))\nmodel.add(Softmax())\n\nmodel.compile(loss = keras.losses.categorical_crossentropy, optimizer='adam', metrics=['categorical_accuracy'])\n\nmodel.summary()\n\n# TO GRAYSCALE IMAGES\n\ndef to_grayscale(image):\n image = tensorflow.image.rgb_to_grayscale(image)\n return image\n\n# TRAIN GENERATORS\n\nprint(\"Retrieving data and generating trainers ...\")\n\ndata_generator = image.ImageDataGenerator(rescale=1./255, preprocessing_function=to_grayscale)\n\n# RETRIEVING IMAGE NAMES AND ASSOCIATED LABELS\ntrain_df = pd.read_csv(\"/path_to_your_df/final_training_dataset_oversampled.csv\")\ntest_df = pd.read_csv(\"/path_to_your_df/final_test_dataset_oversampled.csv\")\n\ntrain_generator = data_generator.flow_from_dataframe(directory='/path_to_your_images', target_size=(256, 256), shuffle=False, batch_size = 32, class_mode = 'categorical', dataframe = train_df, x_col = 'Filenames', y_col = 'labels', validate_filenames=False)\n\ntest_generator = data_generator.flow_from_dataframe(directory='/path_to_your_images', target_size=(256, 256), shuffle=False, batch_size = 32, class_mode = 'categorical', dataframe = test_df, x_col = 'Filenames', y_col = 'labels', validate_filenames=False)\n\n# FITTING MODEL\n\nprint(\"Fitting model ...\")\n\ne_s = EarlyStopping(monitor='val_loss', patience = 1)\nhistory = model.fit(train_generator, steps_per_epoch=len(train_generator), epochs=15, callbacks=[e_s], validation_data=test_generator)\n\nmodel.save('/path_to_your_models_directory/model_multiclass_33classes_256_256.h5')\n\nprint(\"Training finished, model saved.\")\n", "sub_path": "code/cnn_model_multiclass_merged_classes.py", "file_name": "cnn_model_multiclass_merged_classes.py", "file_ext": "py", "file_size_in_byte": 2354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "keras.losses", "line_number": 39, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.image.rgb_to_grayscale", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 46, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image", "line_number": 47, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "652228481", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 9 12:21:51 2017\n\n@author: helio\n\"\"\"\n\nfrom flask import Flask, jsonify,request,abort\nfrom sqlalchemy import create_engine\nfrom collections import OrderedDict\nimport logging as logg\nfrom logging.handlers import RotatingFileHandler\nimport sys\n#==============================================================================\n# Defining logfile\n#==============================================================================\n\npth_log=sys.argv[1]\n\nfilesize=5 #in Mb\n\nlogg.basicConfig(level=logg.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m-%Y %H:%M',\n filename=pth_log,\n filemode='w')\nhandler = RotatingFileHandler(pth_log,maxBytes=filesize*1024*1024,backupCount=20,mode='a')\n\n\nlog = logg.getLogger()\n\nlog.addHandler(handler)\n\n#==============================================================================\n# END OF LOGFILE\n#==============================================================================\n\napp = Flask(__name__)\n\ne = create_engine('sqlite:///info_feiras.db')\n\n#==============================================================================\n# PRESENT ALL THE DB \n#==============================================================================\n@app.route('/api/v1.0/feiras', methods=['GET'])\ndef get_feiras():\n query = e.execute(\"select * from feiras;\")\n return jsonify([dict(zip(tuple (query.keys()) ,i)) for i in query.cursor])\n\n#==============================================================================\n# ADD NEW FEIRA WITH SOME MANDATORY ENTRIES\n#==============================================================================\n\n@app.route('/api/v1.0/feiras', methods=['POST'])\ndef add_feira():\n obrig=[\"LONG\",\"LAT\",\"SETCENS\",\"AREAP\",\"CODDIST\",\"DISTRITO\",\"CODSUBPREF\",\"SUBPREFE\",\"REGIAO5\",\"REGIAO8\",\"NOME_FEIRA\"\n ,\"REGISTRO\",\"LOGRADOURO\"]\n if not request.is_json:\n abort(400)\n if not set(obrig).issubset(set(request.json)):\n log.error(\"Feira failed to add, missed some entry parameter;\")\n abort(400)\n entry=[request.json.get(i) for i in obrig]\n [entry.append(request.json.get(i,\"\")) for i in [\"NUMERO\",\"BAIRRO\",\"REFERENCIA\"]]\n string=(\"?,\"*len(entry))[:-1]\n entry=tuple(entry)\n exect = e.execute(\"insert into feiras values(null, %s);\"%string, entry)\n q=e.execute(\"select max(id) from feiras\")\n idd=q.fetchall()[0][0]\n log.info(\"Feira ID=%i succefully added;\"%idd)\n return jsonify({'Entry': True})\n\n#==============================================================================\n# REMOVE FEIRA USING ID\n#==============================================================================\n\n@app.route('/api/v1.0/feiras/', methods=['DELETE'])\ndef remove_feira(del_ID):\n query = e.execute(\"delete from feiras where id=%i\"%del_ID)\n return jsonify({'Deleted': True})\n\n\n#==============================================================================\n# UPDATE ALL INFORMATION OF FEIRA BASED ON ID, EXCEPT ID ITSELF\n#==============================================================================\n\n@app.route('/api/v1.0/feiras/', methods=['PUT'])\ndef update_feira(change_id):\n all_cols=[\"LONG\",\"LAT\",\"SETCENS\",\"AREAP\",\"CODDIST\",\"DISTRITO\",\"CODSUBPREF\",\"SUBPREFE\",\"REGIAO5\",\"REGIAO8\",\"NOME_FEIRA\"\n ,\"REGISTRO\",\"LOGRADOURO\",\"NUMERO\",\"BAIRRO\",\"REFERENCIA\"]\n changed=dict((i, False) for i in all_cols) # create the dict for the return\n if not request.is_json:\n log.error(\"Feira ID=%i failed to update, input not JSON;\"%change_id)\n abort(400)\n if \"ID\" in request.json.keys(): \n log.error(\"Feira ID=%i failed to update, ID can't be updated;\"%change_id)\n abort(400)\n entry=request.json.keys()\n tmp=dict((i, True) for i in entry)\n changed.update(tmp)\n str_entry=(',').join([\"%s=%s\"%(i,request.json.get(i)) for i in entry])\n e.execute(\"update feiras set %s where id=%i\"%(str_entry,change_id))\n log.info(\"Feira ID=%i succefully updated. New parameters: %s;\"%(change_id,str_entry)) \n return jsonify({\"ID\":change_id,\"UPDATED\":dict(changed)}) \n\n#==============================================================================\n# SEARCH FEIRAS BASED ON INFOS\n#==============================================================================\n@app.route('/api/v1.0/feiras/', methods=['GET'])\ndef get_some_feiras(query_string):\n parameters=[\"DISTRITO\",\"REGIAO5\",\"NOME_FEIRA\",\"BAIRRO\"]\n query_parameters=query_string.split(\"&\")\n string=[i.split(\"=\") for i in query_parameters]\n pars=[i[0].upper() for i in string]\n if not set(pars).issubset(set(parameters)):\n log.error(\"Search feiras failed for wrong parameter search. Parameters searched: %s;\"%((\", \").join(pars)))\n abort(400)\n str_entry=\"select * from feiras where \"\n for n,i in enumerate(string):\n if n==len(string)-1:\n str_entry+=i[0]+\"='%s';\"%i[1]\n else:\n str_entry+=i[0]+\"='%s' and \"%i[1]\n query=e.execute(str_entry)\n log.info(\"Search feiras succefull. Parameters searched: %s;\"%(query_string)) \n return jsonify([dict(zip(tuple (query.keys()) ,i)) for i in query.cursor])\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n \n", "sub_path": "api_fv.py", "file_name": "api_fv.py", "file_ext": "py", "file_size_in_byte": 5330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.json.keys", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.request.json.keys", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "381232976", "text": "from copy import copy\n\nfrom django.conf import settings\nfrom django.urls import reverse\n\n\ndef get_permissions(response_mapping, custom_mapping):\n \"\"\"\n Build permission mappings.\n\n :param response_mapping: usually a predefined permission template (FORBIDDEN, NOT_FOUND, etc.)\n :type response_mapping: dict\n :param custom_mapping: key/value pairs which need to be customised\n :type custom_mapping: dict\n\n :returns: a new response method and status code mapping\n :rtype: dict\n \"\"\"\n response_mapping = copy(response_mapping)\n response_mapping.update(custom_mapping)\n return response_mapping\n\n\ndef assert_permissions(client_type, response_code_mapping, client_mapping, url_reverse):\n \"\"\"\n Test URL response depending on client type.\n\n :param client_type: type of client (anonymous, user, admin, etc.)\n :type client_type: string\n :param response_code_mapping: request type with a matching response code\n :type response_code_mapping: dict\n :param client_mapping: a fixture that contains client types\n :type client_mapping: dict\n :param url_reverse: tuple of reverse strings for URLs which receive requests\n :type url_reverse: tuple\n \"\"\"\n for method in response_code_mapping.keys():\n for url in url_reverse:\n response_code = getattr(\n client_mapping[client_type], method\n )(reverse(url), secure=not settings.DEBUG).status_code\n\n assert response_code == response_code_mapping[method], print(\n 'client: {}, method: {}, received: {}, expected: {}'.format(\n client_type, method, response_code, response_code_mapping[method]\n )\n )\n", "sub_path": "tests/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "copy.copy", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "159853062", "text": "from sys import stdin\r\nfrom typing import List, Tuple\r\nfrom itertools import groupby\r\n\r\n\r\nSUIT = {\r\n 0: '\\033[34m♠\\033[0m',\r\n 1: '\\033[31m♥\\033[0m',\r\n 2: '\\033[33m♦\\033[0m',\r\n 3: '\\033[32m♣\\033[0m',\r\n 4: '\\033[30m🤡\\033[0m'\r\n}\r\n\r\nHAND_TYPE = [\r\n 'Straight Flush',\r\n 'Four of a Kind',\r\n 'Full House',\r\n 'Flush',\r\n 'Straight',\r\n 'Three of a Kind',\r\n 'Two Pair',\r\n 'One Pair',\r\n 'High Cards'\r\n]\r\n\r\n\r\nclass Card:\r\n def __init__(self, suit: int, number: int):\r\n self.suit = suit\r\n self.number = number\r\n\r\n\r\ndef int_to_char(c: Card) -> str:\r\n if c.suit == 4:\r\n return ''\r\n elif c.number == 1:\r\n return 'A'\r\n elif c.number == 11:\r\n return 'J'\r\n elif c.number == 12:\r\n return 'Q'\r\n elif c.number == 13:\r\n return 'K'\r\n else:\r\n return str(c.number)\r\n\r\n\r\nclass Hand:\r\n def __init__(self, cl: List[Card]):\r\n self.cards = cl\r\n sort_hand(self)\r\n\r\n\r\ndef sort_hand(h: Hand) -> None:\r\n h.cards = sorted(h.cards, key=lambda c: (c.number, c.suit))\r\n\r\n\r\ndef hand_analysis(h: Hand) -> Tuple[bool, bool, Tuple[int, ...]]:\r\n card_rank_duplicate = tuple(sorted(len(list(n)) for _, n in groupby([c.number for c in h.cards])))\r\n is_flush = True if cards[0].suit == cards[1].suit == cards[2].suit == cards[3].suit == cards[4].suit else False\r\n if h.cards[0].number == 1 and h.cards[1].number == 10 and h.cards[2].number == 11 and h.cards[3].number == 12 and h.cards[4].number == 13 :\r\n is_straight = True\r\n else:\r\n is_straight = True if h.cards[0].number == h.cards[1].number-1 == h.cards[2].number-2 == h.cards[3].number-3 == h.cards[4].number-4 else False\r\n return is_flush, is_straight, card_rank_duplicate\r\n\r\n\r\ndef hand_type_analysis(h: Hand) -> None:\r\n is_flush, is_straight, card_rank_duplicate = hand_analysis(h)\r\n if is_straight and is_flush:\r\n h.hand_type = 0\r\n elif card_rank_duplicate == (1, 4):\r\n h.hand_type = 1\r\n elif card_rank_duplicate == (2, 3):\r\n h.hand_type = 2\r\n elif is_flush:\r\n h.hand_type = 3\r\n elif is_straight:\r\n h.hand_type = 4\r\n elif card_rank_duplicate == (1, 1, 3):\r\n h.hand_type = 5\r\n elif card_rank_duplicate == (1, 2, 2):\r\n h.hand_type = 6\r\n elif card_rank_duplicate == (1, 1, 1, 2):\r\n h.hand_type = 7\r\n else:\r\n h.hand_type = 8\r\n\r\n\r\ndef print_cards(h: Hand) -> None:\r\n line = ''\r\n for i, c in enumerate(h.cards):\r\n mark = SUIT[c.suit]\r\n line += '[{0}{1:>2}]'.format(mark, int_to_char(c))\r\n print(line)\r\n\r\n\r\ndef print_hand_type(h: Hand) -> None:\r\n if hasattr(h, 'hand_type'):\r\n print(HAND_TYPE[h.hand_type])\r\n\r\n\r\nif __name__ == '__main__':\r\n cards = []\r\n for _ in range(5):\r\n tmp = [int(x) for x in stdin.readline().rstrip().split()]\r\n cards.append(Card(tmp[0], tmp[1]))\r\n hand = Hand(cards)\r\n print_cards(hand)\r\n hand_type_analysis(hand)\r\n print_hand_type(hand)\r\n", "sub_path": "poker/poker.py", "file_name": "poker.py", "file_ext": "py", "file_size_in_byte": 3004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "itertools.groupby", "line_number": 59, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 58, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 106, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "634722038", "text": "import pygame\r\nfrom pygame.locals import *\r\nimport colors\r\nimport test\r\nfrom ship import *\r\nfrom asteroid import *\r\nfrom physics import *\r\nfrom win32api import GetSystemMetrics\r\n\r\n# Screen and Color Variables\r\nD_WIDTH = 800\r\nD_HEIGHT = 600\r\nFPS = 60\r\n\r\npygame.init()\r\ndisplay = pygame.display.set_mode((D_WIDTH, D_HEIGHT))\r\npygame.display.set_caption('Pysteroids')\r\nclock = pygame.time.Clock()\t\t\r\npygame.mouse.set_visible(True)\r\n\r\n# Entities initialization\r\nship = Ship(D_WIDTH / 2, D_HEIGHT - (D_HEIGHT / 4), display)\r\nag = AsteroidGenerator(display)\r\nag.generate(4)\r\ncd = CollisionDetector(ship, ag, ship.projectiles)\r\n\r\nplaying = True\r\nwhile playing :\r\n\r\n\tkeys = pygame.key.get_pressed()\r\n\r\n\tfor event in pygame.event.get() :\r\n\t\tif event.type == pygame.QUIT :\r\n\t\t\t playing = False\r\n\t\t\t\r\n\t\t# Event handling here--------------------------\r\n\t\t\r\n\t\t# -- Keydown events\r\n\t\tif event.type == pygame.KEYDOWN :\r\n\t\t\tif event.key == pygame.K_SPACE :\r\n\t\t\t\tship.shoot()\r\n\r\n\t\t\tif event.key == pygame.K_p :\r\n\t\t\t\tplaying = False\r\n\t\t# -- End Keydown events\r\n\t\t\r\n\t\t# End event handling -------------------------\r\n\t\t\r\n\t# Game Logic -------------------------------------\r\n\t\r\n\r\n\t# -- Elements updates\r\n\tif keys[pygame.K_a] :\r\n\t\tship.turn('left')\r\n\t\t\r\n\tif keys[pygame.K_d] :\r\n\t\tship.turn('right')\r\n\r\n\tif keys[pygame.K_w] :\r\n\t\tship.boost()\r\n\r\n\tcd.handle_projectile_hits_asteroid()\r\n\tship.update()\r\n\tag.update()\r\n\t# -- End Elements updates\r\n\t\r\n\r\n\t# -- Screen drawing\r\n\tship.draw()\r\n\tag.draw()\r\n\t# -- End Screen drawing\r\n\r\n\r\n\t# End Game Logic ---------------------------------\r\n\r\n\tpygame.display.update()\r\n\tclock.tick(FPS)\r\n\tdisplay.fill(colors.black)\r\n\t\r\npygame.quit()", "sub_path": "asteroids.py", "file_name": "asteroids.py", "file_ext": "py", "file_size_in_byte": 1646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pygame.init", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ship.projectiles", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "ship.shoot", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.K_p", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ship.turn", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.K_d", "line_number": 56, "usage_type": "attribute"}, {"api_name": "ship.turn", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.K_w", "line_number": 59, "usage_type": "attribute"}, {"api_name": "ship.boost", "line_number": 60, "usage_type": "call"}, {"api_name": "ship.update", "line_number": 63, "usage_type": "call"}, {"api_name": "ship.draw", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 76, "usage_type": "attribute"}, {"api_name": "colors.black", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "24345908", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[9]:\n\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sn \nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import MinMaxScaler\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[10]:\n\n\ndata = pd.read_csv(\"datahealth.csv\")\ndata\n\n\n# In[11]:\n\n\nfrom itertools import combinations as com\nimport sklearn.cluster as cluster\n\n\n# In[12]:\n\n\nlist_of_Tupel = data.columns\nlist_of_Tupel = list_of_Tupel.drop(\"stroke\")\nlist_of_Tupel = list_of_Tupel.drop(\"id\")\nprint(list_of_Tupel)\n\n\n# In[13]:\n\n\n#Mögliche Tupel auflisten\nanzahl = 0\nfor tupel in com(list_of_Tupel,2):\n anzahl+=1\n print(anzahl)\n print(tupel)\n\n\n# In[14]:\n\n\ndel data['stroke']\ndel data['id']\ndata\n\n\n# In[15]:\n\n\n# Attributen normieren\nscaler = MinMaxScaler()\n\n#Gender\nscaler.fit(data[['gender']])\ndata['gender'] = scaler.transform(data[['gender']])\n\n#Age\nscaler.fit(data[['age']])\ndata['age'] = scaler.transform(data[['age']])\n\n#hypertension\n\nscaler.fit(data[['hypertension']])\ndata['hypertension'] = scaler.transform(data[['hypertension']])\n\n#heart_disease\n\nscaler.fit(data[['heart_disease']])\ndata['heart_disease'] = scaler.transform(data[['heart_disease']])\n\n# Ever married\nscaler.fit(data[['ever_married']])\ndata['ever_married'] = scaler.transform(data[['ever_married']])\n\n#work_type\nscaler.fit(data[['work_type']])\ndata['work_type'] = scaler.transform(data[['work_type']])\n\n#Residence_type\nscaler.fit(data[['Residence_type']])\ndata['Residence_type'] = scaler.transform(data[['Residence_type']])\n\n#avg_glucose_level\nscaler.fit(data[['avg_glucose_level']])\ndata['avg_glucose_level'] = scaler.transform(data[['avg_glucose_level']])\n\n#bmi\nscaler.fit(data[['bmi']])\ndata['bmi'] = scaler.transform(data[['bmi']])\n\n#smoking_status\nscaler.fit(data[['smoking_status']])\ndata['smoking_status'] = scaler.transform(data[['smoking_status']])\n\ndata\n\n\n# In[16]:\n\n\n# Daten auf 1 normieren\n#Mit einer While SChleife\n#i=0 \n#while 1< 11:\n #tupel = list_of_tupel[i]\n # scaler= MInMAxScaler()\n #scaler.fit(data[[tupel]])\n #data[tupel] = scaler.transform(data[[tupel]])\n # i+=1\n#data\n\n\n# In[17]:\n\n\n#tupel darstellen\ntupel1=data[['gender', 'age']]\n\ntupel2=data[['gender', 'hypertension']]\n\ntupel3=data[['gender', 'heart_disease']]\n\ntupel4=data[['gender', 'ever_married']]\n\ntupel5=data[['gender', 'work_type']]\n\ntupel6=data[['gender', 'Residence_type']]\n\ntupel7=data[['gender', 'avg_glucose_level']]\n\ntupel8=data[['gender', 'bmi']]\n\ntupel9=data[['gender', 'smoking_status']]\n\ntupel10=data[['age', 'hypertension']]\n\ntupel11=data[['age', 'heart_disease']]\n\ntupel12=data[['age', 'ever_married']]\n\ntupel13=data[['age', 'work_type']]\n\ntupel14=data[['age', 'Residence_type']]\n\ntupel15=data[['age', 'avg_glucose_level']]\n\ntupel16=data[['age', 'bmi']]\n\ntupel17=data[['age', 'smoking_status']]\n\ntupel18=data[['hypertension', 'heart_disease']]\n\ntupel19=data[['hypertension', 'ever_married']]\n\ntupel20=data[['hypertension', 'work_type']]\n\ntupel21=data[['hypertension', 'Residence_type']]\n\ntupel22=data[['hypertension', 'avg_glucose_level']]\n\ntupel23=data[['hypertension', 'bmi']]\n\ntupel24=data[['hypertension', 'smoking_status']]\n\ntupel25=data[['heart_disease', 'ever_married']]\n\ntupel26=data[['heart_disease', 'work_type']]\n\ntupel27=data[['heart_disease', 'Residence_type']]\n\ntupel28=data[['heart_disease', 'avg_glucose_level']]\n\ntupel29=data[['heart_disease', 'bmi']]\n\ntupel30=data[['heart_disease', 'smoking_status']]\n\ntupel31=data[['ever_married', 'work_type']]\n\ntupel32=data[['ever_married', 'Residence_type']]\n\ntupel33=data[['ever_married', 'avg_glucose_level']]\n\ntupel34=data[['ever_married', 'bmi']]\n\ntupel35=data[['ever_married', 'smoking_status']]\n\ntupel36=data[['work_type', 'Residence_type']]\n\ntupel37=data[['work_type', 'avg_glucose_level']]\n\ntupel38=data[['work_type', 'bmi']]\n\ntupel39=data[['work_type', 'smoking_status']]\n\ntupel40=data[['Residence_type', 'avg_glucose_level']]\n\ntupel41=data[['Residence_type', 'bmi']]\n\ntupel42=data[['Residence_type', 'smoking_status']]\n\ntupel43=data[['avg_glucose_level', 'bmi']]\n\ntupel44=data[['avg_glucose_level', 'smoking_status']]\n\ntupel45=data[['bmi', 'smoking_status']]\n\n\n# In[50]:\n\n\nT1 = range(1,10)\ninertia1 = []\nfor t1 in T1:\n kmeans = cluster.KMeans(n_clusters = t1, init = \"k-means++\")\n kmeans = kmeans.fit(tupel1)\n inertia_iter1 = kmeans.inertia_\n inertia1.append(inertia_iter1)\n\n\n# In[19]:\n\n\nT2 = range(1,10)\ninertia2 = []\nfor t2 in T2:\n kmeans = cluster.KMeans(n_clusters = t2, init = \"k-means++\")\n kmeans = kmeans.fit(tupel2)\n inertia_iter2 = kmeans.inertia_\n inertia2.append(inertia_iter2)\n\n\n# In[20]:\n\n\nT3 = range(1,10)\ninertia3 = []\nfor t3 in T3:\n kmeans = cluster.KMeans(n_clusters = t3, init = \"k-means++\")\n kmeans = kmeans.fit(tupel3)\n inertia_iter3 = kmeans.inertia_\n inertia3.append(inertia_iter3)\n\n\n# In[21]:\n\n\nT4 = range(1,10)\ninertia4 = []\nfor t4 in T4:\n kmeans = cluster.KMeans(n_clusters = t4, init = \"k-means++\")\n kmeans = kmeans.fit(tupel4)\n inertia_iter4 = kmeans.inertia_\n inertia4.append(inertia_iter4)\n\n\n# In[22]:\n\n\nT5 = range(1,10)\ninertia5 = []\nfor t5 in T5:\n kmeans = cluster.KMeans(n_clusters = t5, init = \"k-means++\")\n kmeans = kmeans.fit(tupel5)\n inertia_iter5 = kmeans.inertia_\n inertia5.append(inertia_iter5)\n\n\n# In[23]:\n\n\nT6 = range(1,10)\ninertia6 = []\nfor t6 in T6:\n kmeans = cluster.KMeans(n_clusters = t6, init = \"k-means++\")\n kmeans = kmeans.fit(tupel6)\n inertia_iter6 = kmeans.inertia_\n inertia6.append(inertia_iter6)\n\n\n# In[24]:\n\n\nT7 = range(1,10)\ninertia7 = []\nfor t7 in T7 :\n kmeans = cluster.KMeans(n_clusters = t7, init = \"k-means++\")\n kmeans = kmeans.fit(tupel7)\n inertia_iter7 = kmeans.inertia_\n inertia7.append(inertia_iter7)\n\n\n# In[25]:\n\n\nT8 = range(1,10)\ninertia8 = []\nfor t8 in T8:\n kmeans = cluster.KMeans(n_clusters = t8, init = \"k-means++\")\n kmeans = kmeans.fit(tupel8)\n inertia_iter8 = kmeans.inertia_\n inertia8.append(inertia_iter8)\n\n\n# In[26]:\n\n\nT9 = range(1,10)\ninertia9 = []\nfor t9 in T9:\n kmeans = cluster.KMeans(n_clusters = t9, init = \"k-means++\")\n kmeans = kmeans.fit(tupel9)\n inertia_iter9 = kmeans.inertia_\n inertia9.append(inertia_iter9)\n\n\n# In[27]:\n\n\nT10 = range(1,10)\ninertia10 = []\nfor t10 in T10:\n kmeans = cluster.KMeans(n_clusters = t10, init = \"k-means++\")\n kmeans = kmeans.fit(tupel10)\n inertia_iter10 = kmeans.inertia_\n inertia10.append(inertia_iter10)\n\n\n# In[28]:\n\n\nT11 = range(1,10)\ninertia11 = []\nfor t11 in T11:\n kmeans = cluster.KMeans(n_clusters = t11, init = \"k-means++\")\n kmeans = kmeans.fit(tupel11)\n inertia_iter11 = kmeans.inertia_\n inertia11.append(inertia_iter11)\n\n\n# In[29]:\n\n\nT12 = range(1,10)\ninertia12 = []\nfor t12 in T12:\n kmeans = cluster.KMeans(n_clusters = t12, init = \"k-means++\")\n kmeans = kmeans.fit(tupel12)\n inertia_iter12 = kmeans.inertia_\n inertia12.append(inertia_iter12)\n\n\n# In[30]:\n\n\nT13 = range(1,10)\ninertia13 = []\nfor t13 in T13:\n kmeans = cluster.KMeans(n_clusters = t13, init = \"k-means++\")\n kmeans = kmeans.fit(tupel13)\n inertia_iter13 = kmeans.inertia_\n inertia13.append(inertia_iter13)\n\n\n# In[31]:\n\n\nT14 = range(1,10)\ninertia14 = []\nfor t14 in T14:\n kmeans = cluster.KMeans(n_clusters = t14, init = \"k-means++\")\n kmeans = kmeans.fit(tupel14)\n inertia_iter14 = kmeans.inertia_\n inertia14.append(inertia_iter14)\n\n\n# In[32]:\n\n\nT15 = range(1,10)\ninertia15 = []\nfor t15 in T15:\n kmeans = cluster.KMeans(n_clusters = t15, init = \"k-means++\")\n kmeans = kmeans.fit(tupel15)\n inertia_iter15 = kmeans.inertia_\n inertia15.append(inertia_iter15)\n\n\n# In[33]:\n\n\nT16 = range(1,10)\ninertia16 = []\nfor t16 in T16:\n kmeans = cluster.KMeans(n_clusters = t16, init = \"k-means++\")\n kmeans = kmeans.fit(tupel16)\n inertia_iter16 = kmeans.inertia_\n inertia16.append(inertia_iter16)\n\n\n# In[34]:\n\n\nT17 = range(1,10)\ninertia17 = []\nfor t17 in T17 :\n kmeans = cluster.KMeans(n_clusters = t17, init = \"k-means++\")\n kmeans = kmeans.fit(tupel17)\n inertia_iter17 = kmeans.inertia_\n inertia17.append(inertia_iter17)\n\n\n# In[35]:\n\n\nT18 = range(1,10)\ninertia18 = []\nfor t18 in T18:\n kmeans = cluster.KMeans(n_clusters = t18, init = \"k-means++\")\n kmeans = kmeans.fit(tupel18)\n inertia_iter18 = kmeans.inertia_\n inertia18.append(inertia_iter18)\n\n\n# In[36]:\n\n\nT19 = range(1,10)\ninertia19 = []\nfor t19 in T19:\n kmeans = cluster.KMeans(n_clusters = t19, init = \"k-means++\")\n kmeans = kmeans.fit(tupel19)\n inertia_iter19 = kmeans.inertia_\n inertia19.append(inertia_iter19)\n\n\n# In[37]:\n\n\nT20 = range(1,10)\ninertia20 = []\nfor t20 in T20:\n kmeans = cluster.KMeans(n_clusters = t20, init = \"k-means++\")\n kmeans = kmeans.fit(tupel20)\n inertia_iter20 = kmeans.inertia_\n inertia20.append(inertia_iter20)\n\n\n# In[38]:\n\n\nT21 = range(1,10)\ninertia21 = []\nfor t21 in T21:\n kmeans = cluster.KMeans(n_clusters = t21, init = \"k-means++\")\n kmeans = kmeans.fit(tupel21)\n inertia_iter21 = kmeans.inertia_\n inertia21.append(inertia_iter21)\n\n\n# In[39]:\n\n\nT22 = range(1,10)\ninertia22 = []\nfor t22 in T22:\n kmeans = cluster.KMeans(n_clusters = t22, init = \"k-means++\")\n kmeans = kmeans.fit(tupel22)\n inertia_iter22 = kmeans.inertia_\n inertia22.append(inertia_iter22)\n\n\n# In[40]:\n\n\nT23 = range(1,10)\ninertia23 = []\nfor t23 in T23:\n kmeans = cluster.KMeans(n_clusters = t23, init = \"k-means++\")\n kmeans = kmeans.fit(tupel23)\n inertia_iter23 = kmeans.inertia_\n inertia23.append(inertia_iter23)\n\n\n# In[41]:\n\n\nT25 = range(1,10)\ninertia25 = []\nfor t25 in T25:\n kmeans = cluster.KMeans(n_clusters = t25, init = \"k-means++\")\n kmeans = kmeans.fit(tupel25)\n inertia_iter25 = kmeans.inertia_\n inertia25.append(inertia_iter25)\n\n\n# In[42]:\n\n\nT26 = range(1,10)\ninertia26 = []\nfor t26 in T26:\n kmeans = cluster.KMeans(n_clusters = t26, init = \"k-means++\")\n kmeans = kmeans.fit(tupel26)\n inertia_iter26 = kmeans.inertia_\n inertia26.append(inertia_iter26)\n\n\n# In[43]:\n\n\nT27 = range(1,10)\ninertia27 = []\nfor t27 in T27 :\n kmeans = cluster.KMeans(n_clusters = t27, init = \"k-means++\")\n kmeans = kmeans.fit(tupel27)\n inertia_iter27 = kmeans.inertia_\n inertia27.append(inertia_iter27)\n\n\n# In[44]:\n\n\nT28 = range(1,10)\ninertia28 = []\nfor t28 in T28:\n kmeans = cluster.KMeans(n_clusters = t28, init = \"k-means++\")\n kmeans = kmeans.fit(tupel8)\n inertia_iter28 = kmeans.inertia_\n inertia28.append(inertia_iter28)\n\n\n# In[45]:\n\n\nT29 = range(1,10)\ninertia29 = []\nfor t29 in T29:\n kmeans = cluster.KMeans(n_clusters = t29, init = \"k-means++\")\n kmeans = kmeans.fit(tupel29)\n inertia_iter29 = kmeans.inertia_\n inertia29.append(inertia_iter29)\n\n\n# In[46]:\n\n\nT24 = range(1,10)\ninertia24 = []\nfor t24 in T24:\n kmeans = cluster.KMeans(n_clusters = t24, init = \"k-means++\")\n kmeans = kmeans.fit(tupel24)\n inertia_iter24 = kmeans.inertia_\n inertia24.append(inertia_iter24)\n\n\n# In[47]:\n\n\nT30 = range(1,10)\ninertia30 = []\nfor t30 in T30:\n kmeans = cluster.KMeans(n_clusters = t30, init = \"k-means++\")\n kmeans = kmeans.fit(tupel30)\n inertia_iter30 = kmeans.inertia_\n inertia30.append(inertia_iter30)\n\n\n# In[48]:\n\n\nT31 = range(1,10)\ninertia31 = []\nfor t31 in T31:\n kmeans = cluster.KMeans(n_clusters = t31, init = \"k-means++\")\n kmeans = kmeans.fit(tupel31)\n inertia_iter31 = kmeans.inertia_\n inertia.append(inertia_iter31)\n\n\n# In[ ]:\n\n\nT32 = range(1,10)\ninertia32 = []\nfor t32 in T32:\n kmeans = cluster.KMeans(n_clusters = t32, init = \"k-means++\")\n kmeans = kmeans.fit(tupel32)\n inertia_iter32 = kmeans.inertia_\n inertia32.append(inertia_iter32)\n\n\n# In[64]:\n\n\nT33 = range(1,10)\ninertia33 = []\nfor t33 in T33:\n kmeans = cluster.KMeans(n_clusters = t33, init = \"k-means++\")\n kmeans = kmeans.fit(tupel33)\n inertia_iter33 = kmeans.inertia_\n inertia33.append(inertia_iter33)\n\n\n# In[65]:\n\n\nT34 = range(1,10)\ninertia34 = []\nfor t34 in T34:\n kmeans = cluster.KMeans(n_clusters = t34, init = \"k-means++\")\n kmeans = kmeans.fit(tupel34)\n inertia_iter34 = kmeans.inertia_\n inertia34.append(inertia_iter34)\n\n\n# In[66]:\n\n\nT35 = range(1,10)\ninertia35 = []\nfor t35 in T35:\n kmeans = cluster.KMeans(n_clusters = t35, init = \"k-means++\")\n kmeans = kmeans.fit(tupel35)\n inertia_iter35 = kmeans.inertia_\n inertia35.append(inertia_iter35)\n\n\n# In[67]:\n\n\nT36 = range(1,10)\ninertia36 = []\nfor t36 in T36:\n kmeans = cluster.KMeans(n_clusters = t36, init = \"k-means++\")\n kmeans = kmeans.fit(tupel36)\n inertia_iter36 = kmeans.inertia_\n inertia36.append(inertia_iter36)\n\n\n# In[68]:\n\n\nT37 = range(1,10)\ninertia37 = []\nfor t37 in T37 :\n kmeans = cluster.KMeans(n_clusters = t37, init = \"k-means++\")\n kmeans = kmeans.fit(tupel37)\n inertia_iter37 = kmeans.inertia_\n inertia37.append(inertia_iter37)\n\n\n# In[69]:\n\n\nT38 = range(1,10)\ninertia38 = []\nfor t38 in T38:\n kmeans = cluster.KMeans(n_clusters = t38, init = \"k-means++\")\n kmeans = kmeans.fit(tupel38)\n inertia_iter38 = kmeans.inertia_\n inertia38.append(inertia_iter38)\n\n\n# In[70]:\n\n\nT39 = range(1,10)\ninertia39 = []\nfor t39 in T39:\n kmeans = cluster.KMeans(n_clusters = t39, init = \"k-means++\")\n kmeans = kmeans.fit(tupel39)\n inertia_iter39 = kmeans.inertia_\n inertia39.append(inertia_iter39)\n\n\n# In[71]:\n\n\nT40= range(1,10)\ninertia40 = []\nfor t40 in T40:\n kmeans = cluster.KMeans(n_clusters = t40, init = \"k-means++\")\n kmeans = kmeans.fit(tupel40)\n inertia_iter40 = kmeans.inertia_\n inertia40.append(inertia_iter40)\n\n\n# In[72]:\n\n\nT41= range(1,10)\ninertia41 = []\nfor t41 in T41:\n kmeans = cluster.KMeans(n_clusters = t41, init = \"k-means++\")\n kmeans = kmeans.fit(tupel41)\n inertia_iter41 = kmeans.inertia_\n inertia41.append(inertia_iter41)\n\n\n# In[73]:\n\n\nT42 = range(1,10)\ninertia42 = []\nfor t42 in T42:\n kmeans = cluster.KMeans(n_clusters = t42, init = \"k-means++\")\n kmeans = kmeans.fit(tupel42)\n inertia_iter42 = kmeans.inertia_\n inertia42.append(inertia_iter42)\n\n\n# In[74]:\n\n\nT43 = range(1,10)\ninertia43 = []\nfor t43 in T43:\n kmeans = cluster.KMeans(n_clusters = t43, init = \"k-means++\")\n kmeans = kmeans.fit(tupel43)\n inertia_iter43 = kmeans.inertia_\n inertia43.append(inertia_iter43)\n\n\n# In[75]:\n\n\nT44 = range(1,10)\ninertia44 = []\nfor t44 in T44:\n kmeans = cluster.KMeans(n_clusters = t44, init = \"k-means++\")\n kmeans = kmeans.fit(tupel44)\n inertia_iter44 = kmeans.inertia_\n inertia44.append(inertia_iter44)\n\n\n# In[76]:\n\n\nT45 = range(1,10)\ninertia45 = []\nfor t45 in T45:\n kmeans = cluster.KMeans(n_clusters = t45, init = \"k-means++\")\n kmeans = kmeans.fit(tupel45)\n inertia_iter45 = kmeans.inertia_\n inertia45.append(inertia_iter45)\n\n\n# In[51]:\n\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T1, inertia1, 'x-')\nplt.plot(T2, inertia2, 'x-')\nplt.plot(T3, inertia3, 'x-')\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimiert K')\nplt.legend([\"genderVsage\",\"genderVsHpertension\", \"genderVsHeart_deseases\"])\nplt.show()\n\n\n# In[52]:\n\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T4, inertia4, 'x-')\nplt.plot(T5, inertia5, 'x-')\nplt.plot(T6, inertia6, 'x-')\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"genderVsever_married\",\"GenderVsWorkType\", \"genderVsresidence\"])\nplt.show()\n\n\n# In[53]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T7, inertia7, 'x-')\nplt.plot(T8, inertia8, 'x-')\nplt.plot(T9, inertia9, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"genderVsGlucose\",\"GenderVsBMI\", \"genderVsSmokingStatus\",\"genderVsHypertension\"])\nplt.show()\n\n\n# In[54]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T10, inertia10, 'x-')\nplt.plot(T11, inertia11, 'x-')\nplt.plot(T12, inertia12, 'x-')\nplt.plot(T13, inertia13, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"ageVshypertension\",\"ageVsheart_disease\", \"ageVsever_married\",\"ageVswork_type\"])\nplt.show()\n\n\n# In[55]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T14, inertia14, 'x-')\nplt.plot(T15, inertia15, 'x-')\nplt.plot(T16, inertia16, 'x-')\nplt.plot(T17, inertia17, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"ageVsResidence_type\",\"ageVsGlucose\", \"ageVsBmi\",\"ageVsSmoking_status\"])\nplt.show()\n\n\n# In[56]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T18, inertia18, 'x-')\nplt.plot(T19, inertia19, 'x-')\nplt.plot(T20, inertia20, 'x-')\n\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"hypertensionVsHeart_disease\",\"hypertensionVsEver_married\", \"HypertensionVswork_type\"])\nplt.show()\n\n\n# In[57]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T21, inertia21, 'x-')\nplt.plot(T22, inertia22, 'x-')\nplt.plot(T23, inertia23, 'x-')\nplt.plot(T24, inertia24, 'x-')\n\n\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"hypertensionVsResidence_type\",\"hypertensionVsavg_glucose_level\", \"hypertensionVsBmi\",\"hypertensionVssmoking_status\"])\nplt.show()\n\n\n# In[58]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T25, inertia25, 'x-')\nplt.plot(T26, inertia26, 'x-')\nplt.plot(T27, inertia27, 'x-')\nplt.plot(T28, inertia28, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"heart_diseaseVSever_married\",\"heart_diseaseVswork_type\", \"heart_diseaseVsResidence_type\",\"heart_diseaseVsavg_glucose_level\"])\nplt.show()\n\n\n# In[59]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T29, inertia29, 'x-')\nplt.plot(T30, inertia30, 'x-')\nplt.plot(T31, inertia31, 'x-')\nplt.plot(T32, inertia32, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbogen Kurv zeigt die optimierte K')\nplt.legend([\"heart_diseaseVsBmi\",\"heart_diseaseVsSmoking_status\", \"ever_marriedVswork_type\",\"ever_marriedVsResidence_type\"])\nplt.show()\n\n\n# In[60]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T33, inertia33, 'x-')\nplt.plot(T34, inertia34, 'x-')\nplt.plot(T35, inertia35, 'x-')\nplt.plot(T36, inertia36, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"ever_marriedVsavg_glucose_level\",\"ever_marriedVsbmi\", \"ever_marriedVssmoking_status\",\"work_typeVsResidence_type\"])\nplt.show()\n\n\n# In[61]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T37, inertia37, 'x-')\nplt.plot(T38, inertia38, 'x-')\nplt.plot(T39, inertia39, 'x-')\nplt.plot(T40, inertia40, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"work_typeVsavg_glucose_level\",\"work_typeVsbmi\", \"work_typeVssmoking_status\",\"Residence_typeVsavg_glucose_level\"])\nplt.show()\n\n\n# In[62]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T44, inertia44, 'x-')\nplt.plot(T45, inertia45, 'x-')\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"avg_glucose_level', 'smoking_status\",\"bmi', 'smoking_status\"])\nplt.show()\n\n\n# In[63]:\n\n\nplt.figure(figsize =(15,11))\nplt.plot(T41, inertia41, 'x-')\nplt.plot(T42, inertia42, 'x-')\nplt.plot(T43, inertia43, 'x-')\n\n\nplt.xlabel('K')\nplt.ylabel('Inertia Werte')\nplt.title('This Elbow Kurv zeigt die optimierte K')\nplt.legend([\"Residence_typeVsBmi\", \"Residence_typeVsSmoking_status\", \"avg_glucose_levelVsBmi\"])\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "Code 1.py", "file_name": "Code 1.py", "file_ext": "py", "file_size_in_byte": 19401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 224, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 224, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 236, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 236, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 248, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 248, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 260, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 260, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 272, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 272, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 284, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 284, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 296, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 296, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 308, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 308, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 320, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 320, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 332, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 332, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 344, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 344, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 356, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 356, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 368, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 368, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 380, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 380, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 392, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 392, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 404, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 404, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 416, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 416, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 428, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 428, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 440, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 440, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 452, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 452, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 464, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 464, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 476, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 476, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 488, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 488, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 500, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 500, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 512, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 512, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 524, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 524, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 536, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 536, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 548, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 548, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 560, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 560, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 572, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 572, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 584, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 584, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 596, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 596, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 608, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 608, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 620, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 620, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 632, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 632, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 644, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 644, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 656, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 656, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 668, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 668, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 680, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 680, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 692, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 692, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 704, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 704, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 716, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 716, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 728, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 728, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 740, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 740, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 752, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 752, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 762, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 762, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 763, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 763, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 764, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 764, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 765, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 765, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 766, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 766, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 767, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 767, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 768, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 768, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 769, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 769, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 770, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 770, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 777, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 777, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 778, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 778, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 779, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 779, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 780, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 780, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 781, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 781, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 782, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 782, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 783, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 783, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 784, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 784, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 785, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 785, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 791, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 791, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 792, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 792, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 793, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 793, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 794, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 794, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 796, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 796, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 797, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 797, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 798, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 798, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 799, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 799, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 800, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 800, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 806, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 806, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 807, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 807, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 808, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 808, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 809, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 809, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 810, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 810, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 812, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 812, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 813, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 813, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 814, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 814, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 815, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 815, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 816, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 816, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 822, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 822, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 823, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 823, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 824, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 824, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 825, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 825, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 826, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 826, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 828, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 828, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 829, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 829, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 830, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 830, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 831, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 831, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 832, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 832, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 838, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 838, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 839, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 839, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 840, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 840, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 841, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 841, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 844, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 844, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 845, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 845, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 846, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 846, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 847, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 847, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 848, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 848, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 854, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 854, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 855, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 855, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 856, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 856, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 857, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 857, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 858, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 858, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 862, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 862, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 863, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 863, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 864, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 864, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 865, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 865, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 866, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 866, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 872, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 872, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 873, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 873, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 874, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 874, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 875, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 875, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 876, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 876, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 878, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 878, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 879, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 879, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 880, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 880, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 881, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 881, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 882, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 882, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 888, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 888, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 889, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 889, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 890, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 890, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 891, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 891, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 892, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 892, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 894, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 894, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 895, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 895, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 896, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 896, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 897, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 897, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 898, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 898, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 904, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 904, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 905, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 905, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 906, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 906, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 907, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 907, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 908, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 908, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 910, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 910, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 911, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 911, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 912, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 912, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 913, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 913, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 914, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 914, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 920, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 920, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 921, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 921, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 922, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 922, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 923, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 923, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 924, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 924, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 926, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 926, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 927, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 927, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 928, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 928, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 929, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 929, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 930, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 930, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 936, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 936, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 937, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 937, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 938, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 938, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 940, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 940, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 941, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 941, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 942, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 942, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 943, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 943, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 944, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 944, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 950, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 950, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 951, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 951, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 952, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 952, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 953, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 953, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 956, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 956, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 957, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 957, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 958, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 958, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 959, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 959, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 960, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 960, "usage_type": "name"}]} +{"seq_id": "22344518", "text": "import matplotlib.pyplot as plt\nimport pickle\nwith open(\"validation_lists.pkl\", 'rb') as f:\n data = pickle.load(f)\n\narr = data[\"val_resp_mae\"]\nx = range(1, len(arr)+1)\nplt.plot(x, arr)\nplt.xlabel('Epochs')\nplt.ylabel('Value Response MAE')\nplt.title('Value Response MAE over Training')\nplt.show()\n\narr = data[\"val_resp_mse\"]\nplt.plot(x, arr)\nplt.xlabel('Epochs')\nplt.ylabel('Value Response MSE')\nplt.title('Value Response MSE over Training')\nplt.show()\n\narr = data[\"val_resp_r2\"]\nplt.plot(x, arr)\nplt.xlabel('Epochs')\nplt.ylabel('Value Response R2')\nplt.title('Value Response R2 over Training')\nplt.show()\n", "sub_path": "complexmodels/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pickle.load", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "155963156", "text": "from rest_framework.urlpatterns import format_suffix_patterns\nfrom django.conf.urls import url\nfrom . import views\n\napp_name = 'lmcweb'\n\nurlpatterns = format_suffix_patterns([\n # url(r'^register/$', views.register, name='register'),\n # url(r'^login_user/$', views.login_user, name='login_user'),\n # url(r'^logout_user/$', views.logout_user, name='logout_user'),\n url(r'^$', views.api_root),\n url(r'^api/posts/$', views.PostList.as_view(), name='post-list'),\n url(r'^api/search_posts/$', views.SearchPosts.as_view(), name='search-post'),\n url(r'^api/post/(?P[0-9]+)/$', views.PostDetail.as_view(), name='post-detail'),\n url(r'^api/popular_posts/$', views.PostPopularList.as_view(), name='popular-list'),\n url(r'^api/latest_posts/$', views.PostLatestList.as_view(), name='latest-list'),\n url(r'^api/related_posts/(?P[0-9]+)/$', views.PostRelatedList.as_view(), name='related-list'),\n url(r'^api/templates/$', views.PosttemplateList.as_view(), name='template-list'),\n url(r'^api/templates/(?P[0-9]+)/$', views.PosttemplateDetail.as_view(), name='template-detail'),\n url(r'^api/tags/$', views.TagList.as_view(), name='tag-list'),\n url(r'^api/tags/(?P[0-9]+)/$', views.TagDetail.as_view(), name='tag-detail'),\n url(r'^api/users/$', views.UserList.as_view(), name='user-list'),\n url(r'^api/users/(?P[0-9]+)/$', views.UserDetail.as_view(), name='user-detail'),\n])\n# urlpatterns = (urlpatterns, allowed=['json', 'html'])\n", "sub_path": "lmcweb/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1481, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.urlpatterns.format_suffix_patterns", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "9161638", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\nimport peewee as p\nimport playhouse.migrate as m\nimport playhouse.signals as ps\nfrom playhouse.reflection import Introspector\nfrom playhouse.shortcuts import dict_to_model\nfrom playhouse.shortcuts import model_to_dict\n\nfrom .compat import iteritems\nfrom .compat import reraise\nfrom .compat import text_type\nfrom .inflect import underscore\nfrom .utils import progressbar\n\ndb_table_name = underscore\n\n\nclass Manager(object):\n \"\"\"Use the manager to manage connections to different databases.\n\n The class provides a `proxy` that can be used in your models. When calling\n `with using` on the manager, all models are automaticly switched to\n the given database. This allows for a _multple database_-style application\n where different customers have different databases, but share the schema.\n\n This is also very handy for switching between _testing_ and _production_\n databases without lots of configuration.\n \"\"\"\n\n def __init__(self, database_class, **options):\n self.database_class = database_class\n\n self.options = options\n self.pool = dict()\n self.models = list()\n\n def using(self, database):\n connection = self.connection(database)\n return p.Using(connection, self.models)\n\n def connection(self, database):\n if database not in self.pool:\n self.pool[database] = self.database_class(database=database,\n **self.options)\n return self.pool[database]\n\n def proxy(self):\n return DatabaseProxy(self)\n\n def analyse(self, database):\n return Analyser(self, database)\n\n\nclass DatabaseProxy(p.Proxy):\n \"\"\"\n Proxy class useful for situations when you wish to defer the initialization\n of an object.\n \"\"\"\n __slots__ = ['obj', '_callbacks', 'manager']\n\n def __init__(self, manager):\n super(DatabaseProxy, self).__init__()\n self.manager = manager\n\n def initialize(self, obj):\n if obj is not None:\n raise RuntimeError('Please use `manager.using(database)`.')\n super(DatabaseProxy, self).initialize(obj)\n\n def register(self, model):\n self.manager.models.append(model)\n\n\nclass BaseModel(p.BaseModel):\n def __new__(cls, name, bases, attrs):\n cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)\n if not hasattr(cls, '_meta'):\n return cls\n\n # overwrite db_table name with our underscore variant\n cls._meta.db_table = db_table_name(name)\n\n if len(cls._meta.fields.values()) > 1 and \\\n isinstance(cls._meta.database, DatabaseProxy):\n cls._meta.database.register(cls)\n\n return cls\n\n\nclass Model(p.with_metaclass(BaseModel, p.Model), p.Model):\n def __init__(self, *args, **kwargs):\n super(Model, self).__init__(*args, **kwargs)\n ps.pre_init.send(self)\n\n def prepared(self):\n result = super(Model, self).prepared()\n ps.post_init.send(self)\n\n return result\n\n def save(self, force_insert=False, only=None, skip_triggers=False):\n pk_value = self._get_pk_value()\n created = force_insert or not bool(pk_value)\n\n if not skip_triggers:\n ps.pre_save.send(self, created=created)\n\n result = super(Model, self).save(force_insert=force_insert,\n only=only)\n\n if not skip_triggers:\n ps.post_save.send(self, created=created, result=result)\n\n return result\n\n def delete_instance(self, recursive=False, delete_nullable=False,\n skip_triggers=False):\n if not skip_triggers:\n ps.pre_delete.send(self)\n\n result = super(Model, self).delete_instance(\n recursive=recursive, delete_nullable=delete_nullable)\n\n if not skip_triggers:\n ps.post_delete.send(self, result=result)\n\n return result\n\n @classmethod\n def get_or_none(cls, *args):\n \"\"\"Get a resource, return None if it's not found.\n\n **Parameters**\n\n :param \\*args: List of query expressions.\n :rtype: Model or None\n \"\"\"\n try:\n return super(Model, cls).get(*args)\n except p.DoesNotExist:\n return None\n\n @classmethod\n def from_dict(cls, data, ignore_unknown=True):\n \"\"\"Convert a dict to a model.\n\n **Parameters**\n\n :param data: A dictionary of data.\n :param ignore_unknown: Ignore unkown fields.\n :type data: dict\n :type ignore_unknown: bool\n :rtype: Model\n\n \"\"\"\n return dict_to_model(cls, data, ignore_unknown)\n\n def to_dict(self, recurse=False, backrefs=False, only=None,\n exclude=None, seen=None):\n \"\"\"\n Convert a model instance (and any related objects) to a dictionary.\n\n **Parameters**\n\n :param recurse: Whether foreign-keys should be recursed.\n Defaults to `False`.\n :param backrefs: Whether lists of related objects should be\n recursed.\n Defaults to `False`.\n :param only: A list (or set) of field instances indicating which\n fields should be included.\n Defaults to `None`.\n :param exclude: A list (or set) of field instances that should be\n excluded from the dictionary.\n Defaults to `None`.\n :param seen: Internally used.\n Defaults to `None`.\n :type recurse: bool\n :type backrefs: bool\n :type only: list\n :type exclude: list\n \"\"\"\n return model_to_dict(self, recurse, backrefs, only, exclude, seen)\n\n def update_with(self, data, exclude=None):\n \"\"\"Update the model with a dictionary.\n\n **Parameters**\n\n :param data: A dictionary of data, fields unknown are skipped.\n :param exclude: A list of keys that are to be excluded.\n :type data: dict\n :type exclude: list\n :rtype: Model\n\n \"\"\"\n for key, value in iteritems(data):\n if exclude and key in exclude:\n continue\n\n setattr(self, key, value)\n return self\n\n def save_dirty(self):\n \"\"\"Only save the fields that are *dirty*.\"\"\"\n return self.save(only=self.dirty_fields)\n\n @classmethod\n def meta_database(cls):\n \"\"\"Retrieve the active database from the model.\"\"\"\n return cls._meta.database\n\n @classmethod\n def meta_name(cls):\n \"\"\"Retrieve the database table name of the model.\n\n :rtype: str\n \"\"\"\n return cls._meta.db_table\n\n @classmethod\n def meta_fields(cls):\n \"\"\"Retrieve all the fields of the model.\n\n :rtype: dict\n \"\"\"\n return cls._meta.fields\n\n @classmethod\n def meta_field(cls, name):\n \"\"\"Retrieve a field by name.\n\n :rtype: peewee.Field\n \"\"\"\n return cls._meta.fields.get(name)\n\n\nclass Analyser(object):\n def __init__(self, manager, database):\n self.manager = manager\n self.database = database\n self.statements = list()\n\n connection = self.manager.connection(self.database)\n self.connection = connection\n\n self.local = None\n self.online = None\n\n self.order_of_models = list()\n self.local_models = dict()\n self.online_models = dict()\n\n def local_models_in_order(self):\n for db_table in self.order_of_models:\n yield db_table, self.local_models[db_table]\n\n def scan(self):\n self.statements = list()\n\n models = p.sort_models_topologically(self.manager.models)\n self.order_of_models = [m._meta.db_table for m in models]\n self.local_models = {m._meta.db_table: m for m in models}\n\n with self.manager.using(self.database):\n self.local = Topology(self.connection, self.local_models)\n\n introspector = Introspector.from_database(self.connection)\n self.online_models = introspector.generate_models()\n self.online = Topology(self.connection, self.online_models)\n\n # first missing tables to be created\n for db_table in self.order_of_models:\n if db_table not in self.online.models:\n local_model = self.local.models[db_table]\n self.state('create_table', local_model['instance'])\n\n # second missing tables to be dropped\n for db_table, online_model in iteritems(self.online.models):\n if db_table not in self.local.models:\n self.state('drop_table', online_model['instance'])\n\n # third scan fields to be created, dropped or mutate\n for db_table, online_model in iteritems(self.online.models):\n if db_table not in self.local.models:\n continue\n\n local_model = self.local.models[db_table]\n\n online_instance = online_model['instance']\n local_instance = local_model['instance']\n\n online_fields = online_model['fields']\n local_fields = local_model['fields']\n\n online_indexes = online_model['indexes']\n local_indexes = local_model['indexes']\n\n # scan indexes to be dropped\n for online_index in online_indexes:\n found = any(l == online_index for l in local_indexes)\n if not found:\n self.state('drop_index', online_instance, online_index)\n\n # fields to be dropped\n for field_name, online_field in iteritems(online_fields):\n if field_name not in local_fields:\n self.state('drop_column', local_instance, online_field)\n\n # fields to be added\n for field_name, local_field in iteritems(local_fields):\n if field_name not in online_fields:\n self.state('add_column', local_instance, local_field)\n\n # fields to be mutated\n for field_name, local_field in iteritems(local_fields):\n if field_name not in online_fields:\n continue\n\n online_field = online_fields[field_name]\n\n if local_field == online_field:\n continue\n\n if local_field.test_modifiers_changed(online_field):\n pass\n # peewee currently does not support reflection based on\n # the modifier, when changed it always triggers this\n # \"changed\" element.\n elif local_field.test_null_changed(online_field):\n if online_field.field.null:\n self.state('add_not_null', local_instance, local_field)\n else:\n self.state('drop_not_null', local_instance,\n local_field)\n else:\n skip = False\n\n if local_field.sql != online_field.sql:\n try:\n from playhouse.postgres_ext import ArrayField\n if isinstance(local_field, ArrayField):\n skip = True\n except ImportError:\n pass\n\n if skip:\n self.state('drop_column', online_instance,\n online_field)\n self.state('add_column', local_instance,\n local_field)\n\n # scan indexes to be created\n for local_index in local_indexes:\n found = any(l == local_index for l in online_indexes)\n if not found:\n self.state('add_index', local_instance, local_index)\n\n def state(self, name, model, item=None):\n self.statements.append([name, model, item])\n\n @property\n def migrator(self): # pragma: no cover\n if isinstance(self.connection, p.PostgresqlDatabase):\n return MigratorWrapper(m.PostgresqlMigrator(self.connection))\n elif isinstance(self.connection, p.MySQLDatabase):\n return MigratorWrapper(m.MySQLMigrator(self.connection))\n elif isinstance(self.connection, p.SqliteDatabase):\n return MigratorWrapper(m.SqliteMigrator(self.connection))\n else:\n raise RuntimeError('Unknown database type, cannot find migrator.')\n\n def auto_migrate(self, show_progressbar=True):\n if not len(self.statements):\n return False\n\n with self.manager.using(self.database):\n migrator = self.migrator\n\n bar = progressbar(self.statements, label='Migrating',\n show_progressbar=show_progressbar)\n\n for statement in bar:\n migrator.run(statement)\n\n return True\n\n def print_migrations(self, out=None):\n if out is None: # pragma: no cover\n from sys import stdout\n out = stdout\n\n write = out.write\n\n tpl = '{table:s} = DroppableTable(\"{table:s}\")\\n'\n drop_tables = [r for r in self.statements if r[0] == 'drop_table']\n\n if len(drop_tables):\n for name, table, item in drop_tables:\n write(tpl.format(table=table._meta.db_table))\n write('\\n')\n\n for name, table, item in self.statements:\n db_table = table._meta.db_table\n\n write('migrator.{name:s}('.format(name=name))\n\n if name == 'create_table':\n write('m.' + table.__name__)\n elif name == 'drop_table':\n write(db_table)\n elif name == 'drop_column':\n write('m.' + table.__name__ + ', ')\n write('p.Field(db_column=\"' + item.field.db_column + '\")')\n elif name == 'add_column':\n write('m.' + table.__name__ + ', ')\n write('m.' + table.__name__ + '.' + item.field.name)\n elif name == 'drop_not_null':\n write('m.' + table.__name__ + ', ')\n write('m.' + table.__name__ + '.' + item.field.name)\n elif name == 'add_not_null':\n write('m.' + table.__name__ + ', ')\n write('m.' + table.__name__ + '.' + item.field.name)\n elif name == 'add_index':\n write('m.' + table.__name__ + ', ')\n write('[')\n for field_name in item.field_names:\n write('\"' + field_name + '\", ')\n write('], ')\n write('True' if item.unique else 'False')\n elif name == 'drop_index':\n write('m.' + table.__name__ + ', ')\n write('[')\n for field_name in item.field_names:\n write('\"' + field_name + '\", ')\n write(']')\n else: # pragma: no cover\n raise ValueError('Unknown migrator.')\n\n write(')\\n')\n\n\nclass MigratorWrapper(object):\n def __init__(self, migrator):\n self.migrator = migrator\n\n def run(self, statement):\n name, table, item = statement\n\n func = getattr(self, name)\n try:\n if name.endswith('_table'):\n func(table)\n elif name == 'add_index':\n func(table, item.field_names, item.unique)\n elif name == 'drop_index':\n func(table, item.field_names)\n else:\n func(table, item.field)\n except Exception as exc:\n msg = 'Error on `{:s}` with `{:s}`.`{:s}`.'.format(\n name, table._meta.db_table, str(item))\n\n msg += '\\nOriginalError: ' + text_type(exc)\n msg += '\\nStatement: ' + repr(statement)\n\n exc_type = type(exc)\n tb = sys.exc_info()[2]\n\n reraise(exc_type(msg), tb)\n\n def create_table(self, table):\n table.create_table(fail_silently=False)\n\n def drop_table(self, table):\n cascade = not isinstance(self.migrator, m.SqliteMigrator)\n self.migrator.database.drop_table(table, fail_silently=False,\n cascade=cascade)\n\n def drop_column(self, table, field):\n cascade = not isinstance(self.migrator, m.SqliteMigrator)\n self.migrator.drop_column(\n table._meta.db_table, field.db_column, cascade=cascade).run()\n\n def add_column(self, table, field):\n self.migrator.add_column(\n table._meta.db_table, field.db_column, field).run()\n\n def drop_not_null(self, table, field):\n self.migrator.drop_not_null(\n table._meta.db_table, field.db_column).run()\n\n def add_not_null(self, table, field):\n self.migrator.add_not_null(\n table._meta.db_table, field.db_column).run()\n\n def drop_index(self, table, fields):\n index_name = get_index_name(table, fields)\n\n self.migrator.drop_index(\n table._meta.db_table, index_name).run()\n\n def add_index(self, table, fields, unique):\n self.migrator.add_index(\n table._meta.db_table, fields, unique=unique).run()\n\n\nclass Topology(object):\n def __init__(self, connection, models):\n compiler = connection.compiler()\n\n self.models = dict()\n for db_table, model in iteritems(models):\n self.models[db_table] = {\n 'instance': model,\n 'fields': dict(),\n 'indexes': list(),\n }\n\n fields = self.models[db_table]['fields']\n indexes = self.models[db_table]['indexes']\n\n for n, f in iteritems(model._meta.fields):\n fields[n] = ComparableField(compiler, f)\n\n for index_field in model._fields_to_index():\n indexes.append(ComparableIndex(\n [index_field], index_field.unique))\n\n for index_fields, unique in model._meta.indexes:\n try:\n indexes.append(ComparableIndex(\n [model._meta.fields[n] for n in index_fields], unique))\n except Exception as exc:\n raise Exception(\n 'Cannot contract index %s -> %s.\\n%s: %s' % (\n db_table, str(index_fields),\n exc.__class__.__name__, str(exc)))\n\n\nclass ComparableField(object):\n def __init__(self, compiler, field):\n \"\"\"\n :type field: peewee.Field\n \"\"\"\n self.compiler = compiler\n self.field = field\n self.db_name = field.db_column\n\n self.sql, self.params = self.get_definition()\n\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n def __str__(self):\n return self.db_name\n\n def __repr__(self):\n return '<{:s} ({:s} [{:s}])>'.format(\n self.db_name, self.sql,\n ', '.join(str(v) for v in self.params))\n\n def get_definition(self, field=None):\n if field is None:\n field = self.field\n\n return self.compiler.parse_node(self.compiler.field_definition(field))\n\n def test_null_changed(self, other):\n clone = self.field.clone_base()\n\n if hasattr(clone, 'max_length') and hasattr(other.field, 'max_length'):\n clone.max_length = other.field.max_length\n\n clone.null = not clone.null\n\n return self.get_definition(clone) == other.get_definition()\n\n def test_modifiers_changed(self, other):\n clone = self.field.clone_base()\n\n clone_modifiers = '-'.join(str(m) for m\n in clone.get_modifiers() or list())\n other_modifiers = '-'.join(str(m) for m\n in other.field.get_modifiers() or list())\n\n if clone_modifiers == other_modifiers:\n return False\n\n for attr in ['max_length', 'max_digits', 'decimal_places']:\n setattr(clone, attr, getattr(other.field, attr, None))\n\n return self.get_definition(clone) == other.get_definition()\n\n\nclass ComparableIndex(object):\n def __init__(self, fields, unique):\n self.fields = fields\n self.field_names = [f.db_column for f in fields]\n self.unique = unique\n\n def __str__(self):\n return get_index_name(self.fields[0].model_class, self.field_names)\n\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n def __repr__(self):\n # TODO: Report bug in sorted index names - peewee.py @3765\n if self.unique:\n return '<{:s} UNIQUE>'.format(', '.join(\n sorted(self.field_names)))\n else:\n return '<{:s}>'.format(', '.join(\n sorted(self.field_names)))\n\n\ndef get_index_name(table, fields):\n # TODO: name of index may be incomplete\n return (table._meta.db_table + '_' + '_'.join(fields))[0:63]\n\n\nclass DroppableTable(object):\n def __init__(self, name):\n self.name = name\n\n def as_entity(self):\n return p.Entity(self.name)\n", "sub_path": "src/pandora/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 21037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "inflect.underscore", "line_number": 19, "usage_type": "name"}, {"api_name": "peewee.Using", "line_number": 43, "usage_type": "call"}, {"api_name": "peewee.Proxy", "line_number": 58, "usage_type": "attribute"}, {"api_name": "peewee.BaseModel", "line_number": 78, "usage_type": "attribute"}, {"api_name": "peewee.with_metaclass", "line_number": 94, "usage_type": "call"}, {"api_name": "peewee.Model", "line_number": 94, "usage_type": "attribute"}, {"api_name": "playhouse.signals.pre_init.send", "line_number": 97, "usage_type": "call"}, {"api_name": "playhouse.signals.pre_init", "line_number": 97, "usage_type": "attribute"}, {"api_name": "playhouse.signals", "line_number": 97, "usage_type": "name"}, {"api_name": "playhouse.signals.post_init.send", "line_number": 101, "usage_type": "call"}, {"api_name": "playhouse.signals.post_init", "line_number": 101, "usage_type": "attribute"}, {"api_name": "playhouse.signals", "line_number": 101, "usage_type": "name"}, {"api_name": "playhouse.signals.pre_save.send", "line_number": 110, "usage_type": "call"}, {"api_name": "playhouse.signals.pre_save", "line_number": 110, "usage_type": "attribute"}, {"api_name": "playhouse.signals", "line_number": 110, "usage_type": "name"}, {"api_name": "playhouse.signals.post_save.send", "line_number": 116, "usage_type": "call"}, {"api_name": "playhouse.signals.post_save", "line_number": 116, "usage_type": "attribute"}, {"api_name": "playhouse.signals", "line_number": 116, "usage_type": "name"}, {"api_name": "playhouse.signals.pre_delete.send", "line_number": 123, "usage_type": "call"}, {"api_name": "playhouse.signals.pre_delete", "line_number": 123, "usage_type": "attribute"}, {"api_name": "playhouse.signals", "line_number": 123, "usage_type": "name"}, {"api_name": "playhouse.signals.post_delete.send", "line_number": 129, "usage_type": "call"}, {"api_name": "playhouse.signals.post_delete", "line_number": 129, "usage_type": "attribute"}, {"api_name": "playhouse.signals", "line_number": 129, "usage_type": "name"}, {"api_name": "peewee.DoesNotExist", "line_number": 144, "usage_type": "attribute"}, {"api_name": "playhouse.shortcuts.dict_to_model", "line_number": 160, "usage_type": "call"}, {"api_name": "playhouse.shortcuts.model_to_dict", "line_number": 187, "usage_type": "call"}, {"api_name": "compat.iteritems", "line_number": 201, "usage_type": "call"}, {"api_name": "peewee.sort_models_topologically", "line_number": 265, "usage_type": "call"}, {"api_name": "playhouse.migrate._meta", "line_number": 266, "usage_type": "attribute"}, {"api_name": "playhouse.migrate", "line_number": 266, "usage_type": "name"}, {"api_name": "playhouse.migrate._meta", "line_number": 267, "usage_type": "attribute"}, {"api_name": "playhouse.migrate", "line_number": 267, "usage_type": "name"}, {"api_name": "playhouse.reflection.Introspector.from_database", "line_number": 272, "usage_type": "call"}, {"api_name": "playhouse.reflection.Introspector", "line_number": 272, "usage_type": "name"}, {"api_name": "compat.iteritems", "line_number": 283, "usage_type": "call"}, {"api_name": "compat.iteritems", "line_number": 288, "usage_type": "call"}, {"api_name": "compat.iteritems", "line_number": 310, "usage_type": "call"}, {"api_name": "compat.iteritems", "line_number": 315, "usage_type": "call"}, {"api_name": "compat.iteritems", "line_number": 320, "usage_type": "call"}, {"api_name": "playhouse.postgres_ext.ArrayField", "line_number": 346, "usage_type": "name"}, {"api_name": "peewee.PostgresqlDatabase", "line_number": 368, "usage_type": "attribute"}, {"api_name": "playhouse.migrate.PostgresqlMigrator", "line_number": 369, "usage_type": "call"}, {"api_name": "playhouse.migrate", "line_number": 369, "usage_type": "name"}, {"api_name": "peewee.MySQLDatabase", "line_number": 370, "usage_type": "attribute"}, {"api_name": "playhouse.migrate.MySQLMigrator", "line_number": 371, "usage_type": "call"}, {"api_name": "playhouse.migrate", "line_number": 371, "usage_type": "name"}, {"api_name": "peewee.SqliteDatabase", "line_number": 372, "usage_type": "attribute"}, {"api_name": "playhouse.migrate.SqliteMigrator", "line_number": 373, "usage_type": "call"}, {"api_name": "playhouse.migrate", "line_number": 373, "usage_type": "name"}, {"api_name": "utils.progressbar", "line_number": 384, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 395, "usage_type": "name"}, {"api_name": "compat.text_type", "line_number": 468, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 472, "usage_type": "call"}, {"api_name": "compat.reraise", "line_number": 474, "usage_type": "call"}, {"api_name": "playhouse.migrate.SqliteMigrator", "line_number": 480, "usage_type": "attribute"}, {"api_name": "playhouse.migrate", "line_number": 480, "usage_type": "name"}, {"api_name": "playhouse.migrate.SqliteMigrator", "line_number": 485, "usage_type": "attribute"}, {"api_name": "playhouse.migrate", "line_number": 485, "usage_type": "name"}, {"api_name": "compat.iteritems", "line_number": 517, "usage_type": "call"}, {"api_name": "compat.iteritems", "line_number": 527, "usage_type": "call"}, {"api_name": "playhouse.migrate", "line_number": 586, "usage_type": "argument"}, {"api_name": "playhouse.migrate", "line_number": 588, "usage_type": "argument"}, {"api_name": "peewee.Entity", "line_number": 632, "usage_type": "call"}]} +{"seq_id": "272952924", "text": "from flask import Flask, request\r\nfrom flask import render_template\r\nfrom flask import jsonify\r\n\r\nimport requests\r\n# DB\r\nimport pymysql\r\n\r\nimport json\r\nimport os\r\n\r\n# GUID\r\nimport uuid \r\n\r\n# Cors\r\nfrom flask_cors import CORS\r\n\r\napp=Flask(__name__,template_folder='templates')\r\ncors = CORS(app)\r\n\r\n@app.route('/')\r\ndef home():\r\n return 'Api Rest External'\r\n\r\n@app.route('/Token/Genered', methods=['POST'])\r\ndef TokenGenered():\r\n\r\n Environment = request.form.get(\"Environment\")\r\n \r\n \r\n #Environment = 0\r\n grant_type = \"client_credentials\"\r\n client_id = \"F4DAB8A1-774D-4957-8497-FD4D73361E32\"\r\n client_secret = \"g44bIeDH/YRjeM7IpkOwyfjr8kRUOVUxE/h3swR6RCCs2SPP3eDq4VVXo124YIH3084+nJvAG4SmMVcOxx7JYA==\"\r\n \r\n if (Environment == 1):\r\n Url = \"https://api.vsblty.net/\"\r\n else:\r\n Url = \"https://vsblty-apiv2-qa.azurewebsites.net/\"\r\n\r\n result = [grant_type, client_id, client_secret, Url]\r\n\r\n grant_type = result[0]\r\n client_id = result[1]\r\n client_secret = result[2]\r\n Environment_Url = result[3] + \"/token\"\r\n \r\n\r\n #Realizamos peticion Http\r\n pload = {'grant_type':grant_type,'client_id':client_id,'client_secret':client_secret}\r\n r = requests.post(Environment_Url, data = pload)\r\n\r\n #encoded respuesta\r\n data_string = json.dumps(r.json())\r\n\r\n #Decoded respuesta\r\n decoded = json.loads(data_string)\r\n\r\n # capturamos Variables\r\n try:\r\n Token = str(decoded[\"access_token\"])\r\n Generado = str(decoded[\".issued\"])\r\n Expira = str(decoded[\".expires\"])\r\n Message = \"Token generated correctly... (Expires in 1 Hour)\"\r\n error = \"\"\r\n except:\r\n error = str(decoded[\"error\"])\r\n\r\n if len(error) > 0:\r\n return jsonify([{'Message': error}])\r\n else:\r\n return jsonify([{'Environment': Url, 'Message': Message, 'Token': Token, 'Generated': Generado, 'Expires': Expira}])\r\n\r\n@app.route('/Token/')\r\ndef TokenData(UserId):\r\n #print (EndpointId)\r\n connection = pymysql.connect(host='192.168.100.51',\r\n user='Qatest',\r\n password='Quito.2019',\r\n db='External-Api',\r\n charset='utf8mb4',\r\n cursorclass=pymysql.cursors.DictCursor)\r\n\r\n try:\r\n with connection.cursor() as cursor:\r\n # Read a single record\r\n sql = \"SELECT `grant_type`, `client_id`, `client_secret`, `Environment` FROM `User` WHERE `Id`=%s\"\r\n cursor.execute(sql, (UserId))\r\n result = cursor.fetchone()\r\n\r\n\r\n\r\n grant_type = str(result.get('grant_type'))\r\n client_id = str(result.get('client_id'))\r\n client_secret = str(result.get('client_secret'))\r\n Environment = str(result.get('Environment'))\r\n\r\n result = [grant_type, client_id, client_secret, Environment]\r\n\r\n grant_type = result[0]\r\n client_id = result[1]\r\n client_secret = result[2]\r\n Environment_Url = result[3] + \"/token\"\r\n \r\n\r\n #Realizamos peticion Http\r\n pload = {'grant_type':grant_type,'client_id':client_id,'client_secret':client_secret}\r\n r = requests.post(Environment_Url, data = pload)\r\n\r\n #encoded respuesta\r\n data_string = json.dumps(r.json())\r\n\r\n #Decoded respuesta\r\n decoded = json.loads(data_string)\r\n\r\n # capturamos Variables\r\n Token = str(decoded[\"access_token\"])\r\n Generado = str(decoded[\".issued\"])\r\n Expira = str(decoded[\".expires\"])\r\n\r\n\r\n # Actualizar todos los registos del Usuario\r\n sql_update_query = \"\"\"UPDATE Token set IsActive = %s where User_Id = %s\"\"\"\r\n data_tuple = (0, UserId)\r\n\r\n cursor.execute(sql_update_query, data_tuple)\r\n connection.commit()\r\n\r\n # Insertar \\ \r\n sql = \"INSERT INTO `Token` (`User_Id`, `Token`, `Toke_Generated`, `Token_Expiration`) VALUES (%s, %s, %s, %s)\"\r\n cursor.execute(sql, (UserId, Token, Generado, Expira))\r\n \r\n # connection is not autocommit by default. So you must commit to save\r\n # your changes.\r\n connection.commit()\r\n\r\n return jsonify(\"Se ha Generado Token Correctamente..\")\r\n\r\n finally:\r\n connection.close()\r\n\r\n # Agregar Token Funcion AddToken\r\n\r\nif __name__ == '__main__':\r\n app.run(host='192.168.100.233', port=5080, debug=True)\r\n #app.run(host='192.168.100.51', port=5080, debug=True)", "sub_path": "Backend/Python/Token.py", "file_name": "Token.py", "file_ext": "py", "file_size_in_byte": 4501, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 72, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 77, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 82, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 108, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 111, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "315946173", "text": "import pymysql\r\nimport config\r\n\r\ndb=cursor=None\r\n\r\nclass UploadFile:\r\n\tdef __init__(self, id_file=None, id_jumantik=None, nama_file=None, tgl_file=None):\r\n\t\tself.id_file=id_file\r\n\t\tself.id_jumantik = id_jumantik\r\n\t\tself.nama_file= nama_file\r\n\t\tself.tgl_file=tgl_file\r\n\r\n\tdef openDB(self):\r\n\t\tglobal db, cursor\r\n\t\tdb = pymysql.connect(\r\n\t\t\t\tconfig.DB_HOST,\r\n\t\t\t\tconfig.DB_USER,\r\n\t\t\t\tconfig.DB_PASSWORD,\r\n\t\t\t\tconfig.DB_NAME)\r\n\t\tcursor = db.cursor()\r\n\r\n\tdef closeDB(self):\r\n\t\tglobal db, cursor\r\n\t\tdb.close()\r\n\r\n\tdef selectDB(self):\r\n\t\tself.openDB()\r\n\t\tcursor.execute(\"SELECT * FROM uploadfile\")\r\n\t\tcontainer = []\r\n\t\tfor id_file,id_jumantik, nama_file,tgl_file in cursor.fetchall():\r\n\t\t\tcontainer.append((id_file,id_jumantik, nama_file,tgl_file))\r\n\t\tself.closeDB()\r\n\t\treturn container\r\n\r\n\tdef insertDB(self, data):\r\n\t\tself.openDB()\r\n\t\tcursor.execute(\"INSERT INTO uploadfile (id_jumantik, nama_file, tgl_file) VALUES ('%s','%s', '%s')\" % data)\r\n\t\tdb.commit()\r\n\t\tself.closeDB()\r\n\r\n\tdef getDBbyNo(self, id_file):\r\n\t\tself.openDB()\r\n\t\tcursor.execute(\"SELECT * FROM uploadfile WHERE id_file='%s'\" % id_file)\r\n\t\tdata = cursor.fetchone()\r\n\t\treturn data", "sub_path": "models_uploadfile.py", "file_name": "models_uploadfile.py", "file_ext": "py", "file_size_in_byte": 1140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pymysql.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "config.DB_HOST", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.DB_USER", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.DB_PASSWORD", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.DB_NAME", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "295683920", "text": "from nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.fn as fn\nimport nvidia.dali.types as types\nfrom nvidia.dali.plugin.pytorch import DALIClassificationIterator\n\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom random import shuffle\nfrom random import randint\n\n\nclass ExternalInputIterator(object):\n def __init__(self, dataset, batch_size, pad_idx, training=True):\n self.dataset = dataset\n self.batch_size = batch_size\n self.pad_idx = pad_idx\n self.training = training\n if self.training: shuffle(self.dataset.ids)\n\n def __iter__(self):\n self.idx = 0\n if self.training: shuffle(self.dataset.ids)\n return self\n\n def __next__(self):\n img_batch = []\n cap_batch = []\n\n if self.idx >= len(self.dataset):\n self.__iter__()\n raise StopIteration\n\n for _ in range(self.batch_size):\n img, caps = self.dataset[self.idx]\n img_batch.append(img)\n cap = caps[randint(0,len(caps)-1) if self.training else 0]\n cap_batch.append(cap)\n self.idx += 1\n cap_batch = pad_sequence(cap_batch, batch_first=True, padding_value=self.pad_idx)#.type(torch.long)\n return (img_batch, cap_batch)\n\n def __len__(self):\n return len(self.dataset)\n\n next = __next__\n\n\ndef ExternalSourcePipeline(batch_size, num_threads, device_id, external_data, input_size, training=True):\n pipe = Pipeline(batch_size, num_threads, device_id)\n with pipe:\n images, labels = fn.external_source(source=external_data, num_outputs=2)\n if training:\n images = fn.decoders.image_random_crop(images, device='mixed', output_type=types.RGB, num_attempts=100)\n mirror = fn.random.coin_flip(probability=0.5)\n else:\n images = fn.decoders.image(images, device='mixed', output_type=types.RGB)\n mirror = False\n images = fn.resize(images, device='gpu', resize_shorter=input_size, interp_type=types.INTERP_TRIANGULAR)\n images = fn.crop_mirror_normalize(images.gpu(),\n dtype=types.FLOAT,\n output_layout=\"CHW\",\n crop=(input_size, input_size),\n mean=[0.485 * 255,0.456 * 255,0.406 * 255],\n std=[0.229 * 255,0.224 * 255,0.225 * 255],\n mirror=mirror)\n labels = labels.gpu()\n pipe.set_outputs(images, labels)\n return pipe", "sub_path": "imcap/dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 2594, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "random.shuffle", "line_number": 18, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 22, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 39, "usage_type": "call"}, {"api_name": "nvidia.dali.pipeline.Pipeline", "line_number": 49, "usage_type": "call"}, {"api_name": "nvidia.dali.fn.external_source", "line_number": 51, "usage_type": "call"}, {"api_name": "nvidia.dali.fn", "line_number": 51, "usage_type": "name"}, {"api_name": "nvidia.dali.fn.decoders.image_random_crop", "line_number": 53, "usage_type": "call"}, {"api_name": "nvidia.dali.fn.decoders", "line_number": 53, "usage_type": "attribute"}, {"api_name": "nvidia.dali.fn", "line_number": 53, "usage_type": "name"}, {"api_name": "nvidia.dali.types.RGB", "line_number": 53, "usage_type": "attribute"}, {"api_name": "nvidia.dali.types", "line_number": 53, "usage_type": "name"}, {"api_name": "nvidia.dali.fn.random.coin_flip", "line_number": 54, "usage_type": "call"}, {"api_name": "nvidia.dali.fn.random", "line_number": 54, "usage_type": "attribute"}, {"api_name": "nvidia.dali.fn", "line_number": 54, "usage_type": "name"}, {"api_name": "nvidia.dali.fn.decoders.image", "line_number": 56, "usage_type": "call"}, {"api_name": "nvidia.dali.fn.decoders", "line_number": 56, "usage_type": "attribute"}, {"api_name": "nvidia.dali.fn", "line_number": 56, "usage_type": "name"}, {"api_name": "nvidia.dali.types.RGB", "line_number": 56, "usage_type": "attribute"}, {"api_name": "nvidia.dali.types", "line_number": 56, "usage_type": "name"}, {"api_name": "nvidia.dali.fn.resize", "line_number": 58, "usage_type": "call"}, {"api_name": "nvidia.dali.fn", "line_number": 58, "usage_type": "name"}, {"api_name": "nvidia.dali.types.INTERP_TRIANGULAR", "line_number": 58, "usage_type": "attribute"}, {"api_name": "nvidia.dali.types", "line_number": 58, "usage_type": "name"}, {"api_name": "nvidia.dali.fn.crop_mirror_normalize", "line_number": 59, "usage_type": "call"}, {"api_name": "nvidia.dali.fn", "line_number": 59, "usage_type": "name"}, {"api_name": "nvidia.dali.types.FLOAT", "line_number": 60, "usage_type": "attribute"}, {"api_name": "nvidia.dali.types", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "258332722", "text": "import cv2\nimport torch\nfrom torchvision.transforms import transforms\nfrom codes_recognizer.network_model import Net\n\n\ndef recognizer(path):\n codes = []\n code = []\n\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ])\n\n # load nn model\n model = Net()\n model.load_state_dict(torch.load('codes_recognizer/model.pt'))\n model.eval()\n img = cv2.imread(path)\n\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_gray = cv2.GaussianBlur(img_gray, (5, 5), 0)\n\n ret, im_th = cv2.threshold(img_gray, 90, 255, cv2.THRESH_BINARY_INV)\n\n ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n rects = [cv2.boundingRect(ctr) for ctr in ctrs]\n\n for rect in rects:\n # Crop image\n crop_img = img[rect[1]:rect[1] + rect[3] + 10, rect[0]:rect[0] + rect[2] + 10, 0]\n # Resize the image\n roi = cv2.resize(crop_img, (28, 28), interpolation=cv2.INTER_CUBIC)\n # roi = cv2.dilate(roi, (3, 3))\n # plt.imshow(roi)\n # plt.show()\n im = transform(roi)\n im = im.view(1, 1, 28, 28)\n with torch.no_grad():\n logps = model(im)\n ps = torch.exp(logps)\n probab = list(ps.numpy()[0])\n code.append(probab.index(max(probab)))\n\n\n # cv2.imshow(\"Code\", img)\n # cv2.waitKey()\n\n return code\n\n", "sub_path": "codes_recognizer/rocognizer.py", "file_name": "rocognizer.py", "file_ext": "py", "file_size_in_byte": 1452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torchvision.transforms.transforms.Compose", "line_number": 11, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms", "line_number": 11, "usage_type": "name"}, {"api_name": "torchvision.transforms.transforms.ToTensor", "line_number": 11, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms.Normalize", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms.transforms", "line_number": 12, "usage_type": "name"}, {"api_name": "codes_recognizer.network_model.Net", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "521171707", "text": "# 自动使用cookie登陆的流程\n# 打开登陆页面后自动通过用户密码登陆\n# 自动提取反馈回来的cookie\n# 利用提取的cookie登陆隐私页面\n\nfrom urllib import request, parse\nfrom http import cookiejar\n\n# 创建cookiejar的实例\ncookie = cookiejar.CookieJar()\n# 生成cookie的管理器\ncookie_handler = request.HTTPCookieProcessor(cookie)\n# 创建http请求管理器\nhttp_handler = request.HTTPHandler()\n# 生成https管理器\nhttps_handler = request.HTTPSHandler()\n# 创建请求管理器\nopener = request.build_opener(http_handler, https_handler, cookie_handler)\n\n# 初次登录,验证后给我们cookie\ndef login():\n '''\n 负责初次登录\n 需输入用户名和密码,用来获取cookie凭证\n '''\n # 登录用户地址,进入人人网登录首页,查看网页源码\n # 网页源码中打开查找,查找“下次自动登录”\n # 然后向上找form,里面就有提交表单的地址格式,login-form\n url = 'http://www.renren.com/PLogin.do'\n\n # 此键值需要从登录form的对应两个input中提取name属性\n data = {'email': '908851835@qq.com', 'password': 'zfb123456zfb'}\n\n # 把数据进行编码\n data = parse.urlencode(data)\n\n # 创建一个请求对象\n req = request.Request(url, data=data.encode())\n\n # 使用opener发起请求,会自动提取我的cookie\n rsp = opener.open(req)\n\ndef getHomePage():\n url = 'http://www.renren.com/574862780'\n\n # 如果已经执行了login,则opener则自动已经包含了相应的cookie值\n rsp = opener.open(url)\n # 读取网页的内容并进行解码\n html = rsp.read().decode()\n # 将打开的网页保存为html文件,然后浏览器打开\n with open('43_13_rsp.html', 'w') as f:\n f.write(html)\n\nif __name__ == '__main__':\n # 初次使用用户名密码登陆后提取得到cookie\n login()\n # 使用获取的额cookie登陆个人主页\n getHomePage()\n\n\n", "sub_path": "004_Selenium使用_Cookies-Session验证_JWT验证_人人网_豆瓣/000_43_13_request.urlopen_cookiejar自动提取使用本地保存的cookie_模拟人人网个人主页登陆.py", "file_name": "000_43_13_request.urlopen_cookiejar自动提取使用本地保存的cookie_模拟人人网个人主页登陆.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "http.cookiejar.CookieJar", "line_number": 10, "usage_type": "call"}, {"api_name": "http.cookiejar", "line_number": 10, "usage_type": "name"}, {"api_name": "urllib.request.HTTPCookieProcessor", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 12, "usage_type": "name"}, {"api_name": "urllib.request.HTTPHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 14, "usage_type": "name"}, {"api_name": "urllib.request.HTTPSHandler", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 16, "usage_type": "name"}, {"api_name": "urllib.request.build_opener", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 18, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 35, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "147330951", "text": "import timeywimey\nfrom datetime import datetime, timedelta\nfrom models import Event\n\nclass CalendarMonth:\n monthName = None\n nextMonthName = None\n prevMonthName = None\n nextYear = None\n prevYear = None\n nextMonth = None\n prevMonth = None\n events = None\n\ndef getEventsForMonthAndYear(month, year, user):\n thisMonthStartDate = datetime(year, month, 1)\n\n # adding 31 days will always get a date in the next month\n nextMonthStartDate = (thisMonthStartDate + timedelta(days = 31)) \n\n # subtracting 1 day will always get a date in the previous month\n prevMonthStartDate = (thisMonthStartDate - timedelta(days = 1))\n\n calmonth = CalendarMonth()\n calmonth.monthName = timeywimey.getmonthname(month)\n calmonth.nextYear = nextMonthStartDate.year\n calmonth.prevYear = prevMonthStartDate.year\n calmonth.nextMonth = nextMonthStartDate.month\n calmonth.prevMonth = prevMonthStartDate.month\n calmonth.nextMonthName = timeywimey.getmonthname(nextMonthStartDate.month)\n calmonth.prevMonthName = timeywimey.getmonthname(prevMonthStartDate.month) \n\n events = Event.objects.filter(\n start_time__gte = thisMonthStartDate\n ).filter(\n start_time__lt = nextMonthStartDate\n ).order_by(\n 'start_time'\n )\n\n editEvents = Event.objects.filter(\n start_time__gte = thisMonthStartDate\n ).filter(\n start_time__lt = nextMonthStartDate\n ).filter(\n acluserevent__user__id = user.id\n )\n\n editableEvents = filter(lambda x : x.id in (e.id for e in editEvents), events)\n \n for event in editableEvents:\n event.isEditable = True\n\n calmonth.events = events \n return calmonth\n", "sub_path": "GameCalendar/gcalendar.py", "file_name": "gcalendar.py", "file_ext": "py", "file_size_in_byte": 1730, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.datetime", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 22, "usage_type": "call"}, {"api_name": "timeywimey.getmonthname", "line_number": 25, "usage_type": "call"}, {"api_name": "timeywimey.getmonthname", "line_number": 30, "usage_type": "call"}, {"api_name": "timeywimey.getmonthname", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Event.objects.filter", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Event.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Event", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Event.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Event.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Event", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "210448425", "text": "import requests\n\n\n# API call\ndef api_response_data(url):\n payload={}\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n return response.json()\n\n\n\ndef api_data_parser(data):\n #Parsing the flag url\n country_info = data.get(\"countryInfo\", \"Some error\")\n flag_url = country_info.get(\"flag\", \"Some error in getting flag url\")\n\n #Parsing the data from api response data\n country_name = data.get(\"country\", \"Country name is not available\")\n todayCases = data.get(\"todayCases\", \"todayCases data is not available\")\n todayDeaths = data.get(\"todayDeaths\", \"todayDeaths data is not available\")\n todayRecovered = data.get(\"todayRecovered\", \"todayRecovered data is not available\")\n active = data.get(\"active\", \"active cases data is not available\")\n critical = data.get(\"critical\", \"critical cases data is not available\")\n\n stats = [todayCases, todayDeaths, todayRecovered, active, critical]\n return stats, country_name, flag_url\n\n\n\n", "sub_path": "custom_func.py", "file_name": "custom_func.py", "file_ext": "py", "file_size_in_byte": 1003, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "requests.request", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "595683484", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"CherryPy Status Transaction Metadata object class.\"\"\"\nfrom cherrypy import tools, request\nfrom pacifica.metadata.rest.transaction_queries.query_base import QueryBase\nfrom pacifica.metadata.orm import TransactionRelease, DOITransaction, CitationTransaction\nfrom pacifica.metadata.rest.user_queries.user_lookup import UserLookup\nfrom pacifica.metadata.orm.base import db_connection_decorator\n\n\nclass TransactionReleaseState(QueryBase):\n \"\"\"Retrieves release state for an individual transaction (GET) or set of transactions (POST).\"\"\"\n\n exposed = True\n\n @staticmethod\n def _get_release_state(transaction_list):\n releases = TransactionReleaseState._get_release_info(transaction_list)\n\n output_results = {}\n user_lookup_cache = {}\n found_transactions = []\n\n transactions = QueryBase._get_transaction_sizes(transaction_list)\n\n for release in releases:\n found_transactions.append(release['transaction'])\n if release['authorized_person'] not in user_lookup_cache:\n user_lookup_cache[release['authorized_person']] = UserLookup.get_user_info_block(\n release['authorized_person'], 'simple')\n release.update({\n 'authorized_person': user_lookup_cache[release['authorized_person']],\n 'release_state': 'released', 'display_state': 'Released',\n 'release_date': release['release_date'].isoformat(),\n 'total_size_bytes': transactions[release['transaction']]['total_file_size_bytes'],\n 'total_file_count': transactions[release['transaction']]['total_file_count'],\n 'release_doi_entries': TransactionReleaseState._get_doi_release(release['transaction']),\n 'release_citations': TransactionReleaseState._get_citation_release(release['transaction'])\n })\n output_results[release['transaction']] = release\n\n missing_transactions = TransactionReleaseState._generate_missing_transactions(\n transaction_list, found_transactions\n )\n output_results.update(missing_transactions)\n\n return output_results\n\n @staticmethod\n def _generate_missing_transactions(transaction_list, found_transactions):\n output_results = {}\n missing_transactions = list(\n set(transaction_list) - set(found_transactions))\n for txn in missing_transactions:\n output_results[txn] = {\n 'authorized_person': None, 'release_state': 'not_released',\n 'display_state': 'Not Released', 'transaction': txn\n }\n return output_results\n\n @staticmethod\n def _get_release_info(transaction_list):\n # pylint: disable=no-member\n releases = (TransactionRelease\n .select(TransactionRelease.transaction,\n TransactionRelease.authorized_person,\n TransactionRelease.updated.alias('release_date'))\n .where(TransactionRelease.transaction << transaction_list).dicts())\n # pylint: enable=no-member\n return releases\n\n @staticmethod\n def _get_doi_release(transaction_id):\n output_results = None\n # pylint: disable=no-member\n doi_releases = (DOITransaction\n .select()\n .where(DOITransaction.transaction_id == transaction_id))\n # pylint: enable=no-member\n if doi_releases.exists():\n output_results = []\n for release in doi_releases:\n output_results.append({\n 'doi_status': release.doi.status,\n 'doi_reference': release.doi.doi\n })\n return output_results\n\n @staticmethod\n def _get_citation_release(transaction_id):\n output_results = None\n # pylint: disable=no-member\n citation_releases = (CitationTransaction\n .select()\n .where(CitationTransaction.transaction_id == transaction_id))\n # pylint: enable=no-member\n if citation_releases.exists():\n output_results = []\n for citation_entry in citation_releases:\n output_results.append(\n {\n 'citation_id': citation_entry.citation.id,\n 'title': citation_entry.citation.article_title,\n 'doi_reference': citation_entry.citation.doi_reference\n }\n )\n return output_results\n\n # Cherrypy requires these named methods.\n # pylint: disable=invalid-name\n @staticmethod\n @tools.json_out()\n @db_connection_decorator\n def GET(trans_id=None):\n \"\"\"Return release details about the specified transaction entity.\"\"\"\n return TransactionReleaseState._get_release_state((int(trans_id),))\n\n # pylint: disable=duplicate-code\n @staticmethod\n @tools.json_out()\n @tools.json_in()\n @db_connection_decorator\n # pylint: enable=duplicate-code\n def POST():\n \"\"\"Return transaction release state details for the list of transaction_id's.\"\"\"\n transaction_list = [int(trans_id) for trans_id in request.json]\n return TransactionReleaseState._get_release_state(transaction_list)\n", "sub_path": "pacifica/metadata/rest/transaction_queries/transaction_release_state.py", "file_name": "transaction_release_state.py", "file_ext": "py", "file_size_in_byte": 5368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pacifica.metadata.rest.transaction_queries.query_base.QueryBase", "line_number": 11, "usage_type": "name"}, {"api_name": "pacifica.metadata.rest.transaction_queries.query_base.QueryBase._get_transaction_sizes", "line_number": 24, "usage_type": "call"}, {"api_name": "pacifica.metadata.rest.transaction_queries.query_base.QueryBase", "line_number": 24, "usage_type": "name"}, {"api_name": "pacifica.metadata.rest.user_queries.user_lookup.UserLookup.get_user_info_block", "line_number": 29, "usage_type": "call"}, {"api_name": "pacifica.metadata.rest.user_queries.user_lookup.UserLookup", "line_number": 29, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.TransactionRelease.select", "line_number": 64, "usage_type": "call"}, {"api_name": "pacifica.metadata.orm.TransactionRelease", "line_number": 64, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.TransactionRelease.transaction", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pacifica.metadata.orm.TransactionRelease", "line_number": 65, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.TransactionRelease.authorized_person", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pacifica.metadata.orm.TransactionRelease", "line_number": 66, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.TransactionRelease.updated.alias", "line_number": 67, "usage_type": "call"}, {"api_name": "pacifica.metadata.orm.TransactionRelease.updated", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pacifica.metadata.orm.TransactionRelease", "line_number": 67, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.TransactionRelease.transaction", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pacifica.metadata.orm.TransactionRelease", "line_number": 68, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.DOITransaction.select", "line_number": 76, "usage_type": "call"}, {"api_name": "pacifica.metadata.orm.DOITransaction", "line_number": 76, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.DOITransaction.transaction_id", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pacifica.metadata.orm.DOITransaction", "line_number": 78, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.CitationTransaction.select", "line_number": 93, "usage_type": "call"}, {"api_name": "pacifica.metadata.orm.CitationTransaction", "line_number": 93, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.CitationTransaction.transaction_id", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pacifica.metadata.orm.CitationTransaction", "line_number": 95, "usage_type": "name"}, {"api_name": "cherrypy.tools.json_out", "line_number": 112, "usage_type": "call"}, {"api_name": "cherrypy.tools", "line_number": 112, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.base.db_connection_decorator", "line_number": 113, "usage_type": "name"}, {"api_name": "cherrypy.request.json", "line_number": 126, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 126, "usage_type": "name"}, {"api_name": "cherrypy.tools.json_out", "line_number": 120, "usage_type": "call"}, {"api_name": "cherrypy.tools", "line_number": 120, "usage_type": "name"}, {"api_name": "cherrypy.tools.json_in", "line_number": 121, "usage_type": "call"}, {"api_name": "cherrypy.tools", "line_number": 121, "usage_type": "name"}, {"api_name": "pacifica.metadata.orm.base.db_connection_decorator", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "279245665", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .models import *\nfrom .forms import StockCreateForm, StockSearchForm, StockUpdateForm, IssueForm, ReceiveForm, ReorderLevelForm, \\\n StockHistorySearchForm, CategoryCreateForm\nimport csv\n\n\n# Create your views here.\ndef home(request):\n title = 'Welcome : This is your home page'\n test = 'Hey Neha!'\n context = {\n 'title': title,\n 'test': test,\n }\n return redirect('/list_items/')\n\n\n@login_required\ndef list_items(request):\n header = 'List of listed items'\n form = StockSearchForm(request.POST or None)\n queryset = Stock.objects.all()\n context = {\n 'header': header,\n 'queryset': queryset,\n 'form': form,\n }\n if request.method == \"POST\":\n queryset = Stock.objects.filter(category__name__icontains=form['category'].value(),\n item_name__icontains=form['item_name'].value())\n if form['export_to_CSV'].value() == True:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment;filename=\"List of stock.csv\"'\n writer = csv.writer(response)\n writer.writerow(['CATEGORY', 'ITEM_NAME', 'QUANTITY'])\n instance = queryset\n for stock in instance:\n writer.writerow([stock.category, stock.item_name, stock.quantity])\n return response\n context = {\n 'header': header,\n 'queryset': queryset,\n 'form': form,\n }\n return render(request, 'list_items.html', context)\n\n\n@login_required\ndef add_category(request):\n form = CategoryCreateForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully Created')\n return redirect('/list_items/')\n context = {\n \"form\": form,\n \"title\": \"Add Category\",\n }\n return render(request, \"add_items.html\", context)\n\n\n@login_required\ndef add_items(request):\n form = StockCreateForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully Saved')\n return redirect('/list_items/')\n context = {\n \"form\": form,\n \"title\": \"Add Items\",\n }\n return render(request, \"add_items.html\", context)\n\n\ndef update_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = StockUpdateForm(request.POST or None)\n if request.method == \"POST\":\n form = StockUpdateForm(request.POST, instance=queryset)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully Saved')\n return redirect('/list_items/')\n context = {\n 'form': form,\n }\n return render(request, 'add_items.html', context)\n\n\ndef delete_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n if request.method == \"POST\":\n queryset.delete()\n messages.success(request, 'Deleted Successfully')\n return redirect('/list_items')\n return render(request, 'delete_items.html')\n\n\ndef stock_details(request, pk):\n queryset = Stock.objects.get(id=pk)\n context = {\n \"title\": queryset.item_name,\n \"queryset\": queryset,\n }\n return render(request, \"stock_details.html\", context)\n\n\ndef issue_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = IssueForm(request.POST or None, instance=queryset)\n if form.is_valid():\n instance = form.save(commit=False)\n # instance.receive_quantity = 0\n instance.quantity = instance.quantity - instance.issue_quantity\n instance.issue_by = str(request.user)\n messages.success(request, \"Issued Successfully\" + str(instance.quantity) + \" \" + str(\n instance.item_name) + \"s now left in store\")\n instance.save()\n issue_history = StockHistory(\n id=instance.id,\n last_updated=instance.last_updated,\n category_id=instance.category_id,\n item_name=instance.item_name,\n quantity=instance.quantity,\n issue_to=instance.issue_to,\n issue_by=instance.issue_by,\n issue_quantity=instance.issue_quantity,\n )\n issue_history.save()\n return redirect('/stock_details/' + str(instance.id))\n context = {\n \"title\": \"Issue \" + str(queryset.item_name),\n \"queryset\": queryset,\n \"form\": form,\n \"username\": \"Issue By: \" + str(request.user),\n }\n return render(request, \"add_items.html\", context)\n\n\ndef receive_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = ReceiveForm(request.POST or None, instance=queryset)\n if form.is_valid():\n instance = form.save(commit=False)\n # instance.issue_quantity = 0\n instance.quantity = instance.quantity + instance.receive_quantity\n instance.receive_by = str(request.user)\n messages.success(request, \"Received Successfully\" + str(instance.quantity) + \" \" + str(\n instance.item_name) + \"s now left in store\")\n instance.save()\n receive_history = StockHistory(\n id=instance.id,\n last_updated=instance.last_updated,\n category_id=instance.category_id,\n item_name=instance.item_name,\n quantity=instance.quantity,\n receive_quantity=instance.receive_quantity,\n receive_by=instance.receive_by\n )\n receive_history.save()\n return redirect('/stock_details/' + str(instance.id))\n context = {\n \"title\": \"Receive \" + str(queryset.item_name),\n \"instance\": queryset,\n \"form\": form,\n \"username\": \"Received By: \" + str(request.user),\n }\n return render(request, \"add_items.html\", context)\n\n\ndef reorder_level(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = ReorderLevelForm(request.POST or None, instance=queryset)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"Reorder Level for \" + str(instance.item_name) + \" is updated to \" + str(\n instance.reorder_level))\n return redirect(\"/list_items/\")\n context = {\n \"instance\": queryset,\n \"form\": form,\n }\n return render(request, \"add_items.html\", context)\n\n\ndef list_history(request):\n header = \"History Data\"\n queryset = StockHistory.objects.all()\n form = StockHistorySearchForm(request.POST or None)\n context = {\n \"header\": header,\n \"queryset\": queryset,\n \"form\": form,\n }\n if request.method == \"POST\":\n category = form['category'].value()\n queryset = StockHistory.objects.filter(item_name__icontains=form['item_name'].value(),\n last_updated__range=[\n form['start_date'].value(),\n form['end_date'].value()\n ]\n )\n if category != \"\":\n queryset = queryset.filter(category_id=category)\n if form['export_to_CSV'].value() == True:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Stock History.csv\"'\n writer = csv.writer(response)\n writer.writerow(\n ['CATEGORY',\n 'ITEM NAME',\n 'QUANTITY',\n 'ISSUE QUANTITY',\n 'RECEIVE QUANTITY',\n 'RECEIVE BY',\n 'ISSUE BY',\n 'LAST UPDATED'])\n instance = queryset\n for stock in instance:\n writer.writerow(\n [stock.category,\n stock.item_name,\n stock.quantity,\n stock.issue_quantity,\n stock.receive_quantity,\n stock.receive_by,\n stock.issue_by,\n stock.last_updated])\n return response\n context = {\n \"header\": header,\n \"queryset\": queryset,\n \"form\": form,\n }\n return render(request, \"list_history.html\", context)\n\n\n", "sub_path": "stock/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 19, "usage_type": "call"}, {"api_name": "forms.StockSearchForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 36, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 22, "usage_type": "name"}, {"api_name": "forms.CategoryCreateForm", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 57, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 52, "usage_type": "name"}, {"api_name": "forms.StockCreateForm", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 71, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 66, "usage_type": "name"}, {"api_name": "forms.StockUpdateForm", "line_number": 82, "usage_type": "call"}, {"api_name": "forms.StockUpdateForm", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 87, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 99, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 99, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 100, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 110, "usage_type": "call"}, {"api_name": "forms.IssueForm", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 121, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 121, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 142, "usage_type": "call"}, {"api_name": "forms.ReceiveForm", "line_number": 147, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 153, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 153, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 173, "usage_type": "call"}, {"api_name": "forms.ReorderLevelForm", "line_number": 178, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 182, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 182, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 184, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 189, "usage_type": "call"}, {"api_name": "forms.StockHistorySearchForm", "line_number": 195, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 212, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 214, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 241, "usage_type": "call"}]} +{"seq_id": "358523931", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 25 17:57:21 2020\n\n@author: clarence\n\"\"\"\n#This is a test commit\n\n## nhvnhgnbv n,b,nbnb\nfrom tkinter import *\nimport random\nimport time\nimport os \nimport string\nfrom pygame import mixer # Load the popular external library\nfrom gtts import gTTS\nfrom nltk import tokenize\nimport speech_recognition as sr\n#from rasa.nlu.model import Interpreter\n\nimport os\nimport subprocess\n\nfrom subprocess import PIPE, run\nimport signal\n#from playsound import playsound\n\n\n#----------------OPEN NGROK-------------------------\np = subprocess.Popen(['gnome-terminal', '--disable-factory', '-e', 'bash -c \\\"./ngrok http 5130; sleep 1000000\\\" '], preexec_fn=os.setpgrp)\n\ntime.sleep(7)\n\n#-----------------------GET NGROK URL----------------\n\ncommand = [\"curl\",\"—silent\",\"—show-error\",\"http://127.0.0.1:4040/api/tunnels\",\"|\",\"sed\",\"-nE\",\"\"\"'s/.*public_url\":\"https:..([^\"]*).*/\\1/p'\"\"\"]\nresult = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)\nstr1=result.stdout\n\nimport json\ndiu=json.loads(str1)\n\nngrok_url=diu['tunnels'][0]['public_url']\n\n#-----------------------CALL BOT---------------------------------------------------------\n\nactual_path=os.path.abspath(os.getcwd())\nos.chdir(actual_path+'/Lab1/sample_bot')\np2 = subprocess.Popen(['gnome-terminal', '--disable-factory', '-e', 'bash -c \\\"bash condactivateAlana.sh; sleep 1000000\\\" '], preexec_fn=os.setpgrp)\n\ntime.sleep(10)\nos.chdir(actual_path)\n\n#---------------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom PIL import Image, ImageTk\nWIDTH = 500\nHEIGHT = 300\nsize=300\n#----------------------------------------------------------------------\ndef resizefile(file,size):\n image = Image.open(file)\n image = image.resize((size,size))\n photo = ImageTk.PhotoImage(image)\n return photo\n#-----------------------------------------------------------------------\n \nimport requests\nanswer =\"\"\n\nNew_bots = {\"EcoBot_Fact\":ngrok_url}\nprint(New_bots)\nbot_list_= [New_bots]\nprio_bot_list_=list(bot_list_)\n#bot_list_=[\"clarification_bot\",\"ontology_bot\",\"aiml_bot\",\"coherence_bot\",\"evi\",\"weather_bot\",\"fact_bot\",\"news_bot_v2\",\"wiki_bot_mongo\",\"profanity_bot\",\"reddit_bot\"]\n#prio_bot_list_= list(bot_list_).remove('coherence_bot')\n\n\n\n#-----------------------------------------------------------------------\n\nclass MainWindow():\n\n #----------------\n\n def __init__(self, main):\n\n # canvas for image\n self.canvas = Canvas(main, width=WIDTH, height=HEIGHT)\n self.canvas.grid(row=0, column=0, columnspan = 2,sticky = W+E+N+S )\n\n # images\n self.my_images = []\n self.my_images.append(resizefile(\"./test.png\",size))\n self.my_images.append(resizefile(\"./test1.png\",size))\n self.my_images.append(resizefile(\"./test2.png\",size))\n self.my_images.append(resizefile(\"./test3.png\",size))\n self.my_images.append(resizefile(\"./test4.png\",size))\n self.my_images.append(resizefile(\"./test5.png\",size))\n self.my_image_number = 0\n \n #mood\n self.happy = self.my_images.copy()[0:3]\n #print(len(self.happy))\n self.sad = [self.my_images.copy()[i] for i in [0,3]]\n #print(len(self.sad))\n self.angry = self.my_images.copy()[4:6]\n #print(len(self.angry))\n \n # set first image on canvas\n self.image_on_canvas = self.canvas.create_image(WIDTH/2, HEIGHT/2, image = self.my_images[self.my_image_number])\n\n #text input\n self.variable1=StringVar() # Value saved here\n self.e1 = Entry(main, textvariable = self.variable1)\n self.e1.grid(row=1,column=0, columnspan = 2,sticky = W+E+N+S )\n\n #text output\n self.t1 = Text(main,width=40, height=10)\n self.t1.grid(row=3,column=0, columnspan = 2,sticky = W+E+N+S )\n \n # button to change image\n self.button = Button(main, text=\"Write\", command=self.getText)\n self.button.grid(row=2, column=0, sticky = W+E+N+S )\n \n \n # button to Speak\n self.button = Button(main, text=\"Speak\", command=self.getSpeech)\n self.button.grid(row=2, column=1, sticky = W+E+N+S )\n\n #----------------\n def getText(self):\n text=self.variable1.get()\n self.askAlana(text)\n \n def getSpeech(self):\n text = self.SpeechtoText()\n #time.sleep(1)\n self.askAlana(text)\n \n \n def askAlana(self,text):\n\n #text=self.variable1.get()\n data = {'user_id': 'test-user', 'question': text, 'session_id': 'someonearoundthecornerSSSS', 'projectId': 'CA2020', 'overrides': {'BOT_LIST': bot_list_ , 'PRIORITY_BOTS': [prio_bot_list_]}}\n #data = {'user_id': 'test-user', 'question': text, 'session_id': 'someonearoundthecorner', 'projectId': 'CA2020', 'overrides': {'BOT_LIST': bot_list_ , 'PRIORITY_BOTS': [prio_bot_list_, 'coherence_bot']}}\n \n r= requests.post(url='http://52.56.181.83:5000', json=data)\n #r= requests.post(url='http://52.23.135.246:5000', json=data)\n answer=r.json()['result']\n\n self.t1.configure(state='normal')\n self.t1.delete('1.0', END)\n self.t1.insert(END,answer)\n self.t1.configure(state='disabled')\n \n sentence_list=tokenize.sent_tokenize(answer)\n \n for i in range (0,len(sentence_list)): \n\n \n wordlist=[word.strip(string.punctuation) for word in sentence_list[i].split()]\n numerofword=len(wordlist)\n #print(numerofword)\n \n try:\n output_speech=gTTS(text = sentence_list[i], lang=\"en\", slow = False)\n output_speech.save(\"speech.mp3\")\n \n mixer.init()\n mixer.music.load(\"speech.mp3\")\n mixer.music.play()\n \n \n mood_ = \"happy\"\n for j in range (0,round(numerofword*2.2)):\n root.update()\n self.onButton(mood_)\n time.sleep(0.2)\n \n except AssertionError:\n pass\n \n self.my_image_number = -1\n self.onButton(mood_)\n \n time.sleep(0.5)\n\n \n def onButton(self,mood = None):\n\n if mood is None:\n mood=\"happy\"\n # next image\n \n if mood == \"happy\":\n self.moodlist = self.happy\n if mood == \"sad\":\n self.moodlist = self.sad\n if mood == \"angry\":\n self.moodlist = self.angry\n \n self.my_image_number += 1\n \n # return to first image\n if self.my_image_number == len(self.moodlist) or self.my_image_number > len(self.moodlist) :\n self.my_image_number = 0\n\n # change image\n self.canvas.itemconfig(self.image_on_canvas, image = self.moodlist[self.my_image_number])\n\n def SpeechtoText(self):\n\n r3 = sr.Recognizer()\n \n with sr.Microphone(device_index=0) as source:\n #with sr.Microphone(device_index=0) as source:\n #print('[search edureka : search youtube]')\n print('Want to speak to Alana?')\n try:\n \n r3.adjust_for_ambient_noise(source,duration = 1)\n #r3.energy_threshold = 50\n #r3.dynamic_energy_threshold = False\n print(\"Speak\")\n audio = r3.listen(source, timeout= 5)\n \n except sr.UnknownValueError:\n print('error')\n except sr.RequestError as e:\n print('failed'.format(e))\n \n #print(r3.recognize_google(audio))\n output3=r3.recognize_google(audio, language = 'en-GB', show_all = True)\n output4 = output3['alternative'][0]['transcript']\n \n #print(output3)\n print(output4)\n \n return output4\n \n\n#----------------------------------------------------------------------\n\n\n\nroot = Tk()\n\n\nroot.title(\"Welcome to Alana app\")\n\nMainWindow(root)\nroot.mainloop()\n\n#-----------------KILL TERMINAL----------------------\nos.killpg(p.pid, signal.SIGINT)\nos.killpg(p2.pid, signal.SIGINT)\n", "sub_path": "SmartHome_Cacty/CACTY.py", "file_name": "CACTY.py", "file_ext": "py", "file_size_in_byte": 8255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "subprocess.Popen", "line_number": 31, "usage_type": "call"}, {"api_name": "os.setpgrp", "line_number": 31, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 38, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 38, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 48, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 49, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 50, "usage_type": "call"}, {"api_name": "os.setpgrp", "line_number": 50, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 65, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 67, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 151, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 160, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 160, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 165, "usage_type": "attribute"}, {"api_name": "gtts.gTTS", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 173, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 173, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 174, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 175, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 175, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 182, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 190, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 217, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 219, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 231, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 233, "usage_type": "attribute"}, {"api_name": "os.killpg", "line_number": 259, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.killpg", "line_number": 260, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 260, "usage_type": "attribute"}]} +{"seq_id": "120316438", "text": "# -*- coding: utf-8 -*-\n\nfrom utils.singleton import Singleton\n\n\nclass Factory(object):\n __metaclass__ = Singleton\n # conn_pool = ConnectionPool()\n # packer = DataPacker(1, 2)\n # router = Router()\n\n def __init__(self):\n self.conn_pool = None\n self.packer = None\n self.router = None\n\n\nfactory = Factory()", "sub_path": "factory.py", "file_name": "factory.py", "file_ext": "py", "file_size_in_byte": 339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "utils.singleton.Singleton", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "421728372", "text": "#!/usr/bin/env python3\n#\n# Copyright (c) 2018 Glimp IP Ltd\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport os\nimport argparse\nimport textwrap\nimport json\nimport glob\nimport fnmatch\nimport ntpath\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-a\", \"--add\", action='append', help=\"Add BVH file, or merge another index - implicitly selected for applying any edit options given too\")\nparser.add_argument(\"-r\", \"--remove\", action='append', help=\"Remove BVH file\")\n\n# Select entries to view/edit (overridden when adding new entries)\nparser.add_argument(\"-s\", \"--start\", type=int, default=0, help=\"Start range (negative values are relative to end of index)\")\nparser.add_argument(\"-e\", \"--end\", type=int, default=0, help=\"End range (zero means to go to the end of the index, negative values are relative to end of index)\")\nparser.add_argument(\"-n\", \"--name-match\", action='append', help=\"Only look at entries whose name matches this wildcard pattern\")\nparser.add_argument(\"--file-match\", action='append', help=\"Only look at entries whose relative filename matches this wildcard pattern\")\nparser.add_argument(\"--blacklisted\", action='store_true', help=\"Only look at blacklisted entries\")\nparser.add_argument(\"--non-blacklisted\", action='store_true', help=\"Only look at non-blacklisted entries\")\nparser.add_argument(\"--with-tag\", action='append', help=\"Only look at entries with this tag\")\nparser.add_argument(\"--without-tag\", action='append', help=\"Only look at entries without this tag\")\n\n\n# Edit commands\nparser.add_argument(\"--clear-tags\", action='store_true', help=\"Clear all tags (done before adding any new tags\")\nparser.add_argument(\"-t\", \"--tag\", action='append', help=\"Add tag\")\nparser.add_argument(\"-u\", \"--untag\", action='append', help=\"Remove tag\")\nparser.add_argument(\"--blacklist\", action='store_true', help=\"Mark entries as blacklisted (will add a 'blacklist' tag too)\")\nparser.add_argument(\"--unblacklist\", action='store_true', help=\"Clear blacklist status of entries (will remove any 'blacklist' tag too)\")\nparser.add_argument(\"--fps\", type=int, default=0, help=\"Define what the capture frame rate was (negative means to unset any definition, zero means leave untouched)\")\nparser.add_argument(\"--note\", help=\"Append a descriptive comment\")\n\nparser.add_argument(\"--list\", action=\"store_true\", help=\"List the names of matched entries\")\nparser.add_argument(\"--dry-run\", action=\"store_true\", help=\"Dry run\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Display verbose debug information\")\n\nparser.add_argument(\"index_filename\", help=\"Filename of index.json to parse / edit\")\n\nargs = parser.parse_args()\n\nprint_matched=False\nprint_entries=False\nprint_changes=False\n\nif args.verbose:\n print_matched=True\n print_changes=True\n print_entries=True\n\nif args.dry_run:\n print_matched=True\n print_changes=True\n\nif args.list:\n print_matched=True\n\nfilename_map = {}\nname_map = {}\n\n\ndef process_entry(entry, i):\n changes = []\n\n if 'name' not in entry:\n new_name = ntpath.basename(entry['file'])[:-4]\n\n if new_name in name_map:\n for n in range(1, 1000):\n unique_name = '%s-%05d' % (new_name, n)\n if unique_name not in name_map:\n new_name = unique_name\n break\n if new_name in name_map:\n sys.exit(\"ERROR: Failed to determine unique name for %s\" % entry['file'])\n\n entry['name'] = new_name\n name_map[new_name] = entry\n changes += [ \"Set name to '%s', based on filename\" % new_name]\n\n if args.clear_tags:\n if 'tags' in entry:\n del entry['tags']\n changes += [ \"Clear tags\" ]\n\n if 'camera' in entry:\n del entry['camera']\n changes += [ \"Delete legacy camera data\" ]\n\n if args.blacklist:\n if 'blacklist' not in entry or not entry['blacklist']:\n entry['blacklist']=True\n if 'tags' not in entry:\n entry['tags']={}\n entry['tags']['blacklist']=True\n changes += [ \"Blacklisted\" ]\n\n if args.unblacklist:\n if 'blacklist' in entry:\n del entry['blacklist']\n changes += [ \"Un-blacklisted\" ]\n if 'tags' in entry and 'blacklist' in entry['tags']:\n del entry['tags']['blacklist']\n\n if 'blacklist' in entry:\n if entry['blacklist'] == True:\n if 'tags' not in entry:\n entry['tags']={}\n entry['tags']['blacklist']=True\n else:\n del entry['blacklist']\n changes += [ \"Remove redundant blacklist=false\" ]\n\n if args.fps > 0:\n if 'fps' not in entry or entry['fps'] != args.fps:\n entry['fps'] = args.fps\n changes += [ \"Set fps\" ]\n elif args.fps < 0:\n if 'fps' in entry:\n del entry['fps']\n changes += [ \"Unset fps\" ]\n \n if args.note:\n if 'notes' not in entry:\n entry['notes'] = []\n entry['notes'] = [ args.note ]\n changes += [ \"Add note\" ]\n\n if 'notes' in entry and len(entry['notes']) == 0:\n del entry['notes']\n changes += [ \"Remove empty notes array\" ]\n\n if args.tag:\n for tag in args.tag:\n tag = tag.lower()\n if 'tags' not in entry:\n entry['tags'] = {}\n if tag not in entry['tags']:\n entry['tags'][tag] = True\n changes += [ \"Add tag %s\" % tag ]\n\n if 'tags' in entry and args.untag:\n for tag in args.untag:\n tag = tag.lower()\n if tag in entry['tags']:\n del entry['tags'][tag]\n changes += [ \"Remove tag %s\" % tag ]\n\n if 'tags' in entry and len(entry['tags']) == 0:\n del entry['tags']\n changes += [ \"Remove empty tags\" ]\n\n if print_matched:\n if len(changes):\n print(\"%d) %s - CHANGED\" % (i, entry['name']))\n if print_changes:\n for c in changes:\n print(\"> %s\" % c)\n else:\n print(\"%d) %s - unchanged\" % (i, entry['name']))\n if print_entries:\n print(\" > filename: %s\" % entry['file'])\n if 'blacklist' in entry and entry['blacklist']:\n print(\" > black-listed: true\")\n if 'fps' in entry:\n print(\" > fps: %d\" % entry['fps'])\n if 'notes' in entry and len(entry['notes']):\n print(\" > notes:\")\n for note in entry['notes']:\n print(\" > | %s\" % note)\n if 'tags' in entry and len(entry['tags']):\n print(\" > tags: %s\" % ','.join(entry['tags']))\n\n\ndef normalize_path(bvh_path):\n index_dir = os.path.dirname(args.index_filename)\n abs_bvh_path = os.path.abspath(bvh_path)\n abs_index_dir = os.path.abspath(index_dir)\n # no matter what OS we're using we want consistent filename\n # indexing conventions...\n rel_path = os.path.relpath(abs_bvh_path, abs_index_dir)\n rel_path = ntpath.normpath(rel_path)\n rel_path = ntpath.normcase(rel_path)\n return rel_path\n\n\ndef append_index_entries(entries, full_index):\n # Add all filenames and names to dictionaries so we can ensure we don't\n # index any duplicates...\n for entry in entries:\n if 'file' in entry:\n if entry['file'] in filename_map:\n sys.exit(\"ERROR: %s has duplicate entries for %s\" % (args.index_filename, entry['file']))\n filename_map[entry['file']] = entry\n if 'name' in entry:\n if entry['name'] in name_map:\n sys.exit(\"ERROR: %s has duplicate entries for name: '%s'\" % (args.index_filename, entry['name']))\n name_map[entry['name']] = entry\n\n # Normalize how we blacklist entries:\n blacklisted=False\n if 'blacklist' in entry:\n blacklisted = entry['blacklist']\n del entry['blacklist']\n if 'tags' in entry and 'blacklist' in entry['tags']:\n blacklisted = True\n\n if blacklisted:\n if 'tags' not in entry:\n entry['tags'] = {}\n entry['tags']['blacklist'] = True\n \n full_index.append(entry)\n\n\nindex = []\nif os.path.exists(args.index_filename):\n with open(args.index_filename, 'r') as fp:\n entries = json.load(fp)\n\n print(\"Opened %s with %d entries\" % (args.index_filename, len(entries)))\n\n if args.remove:\n for bvh_path in args.remove:\n rel_path = normalize_path(bvh_path)\n before_len = len(entries)\n entries = [ entry for entry in entries if entry['file'] != rel_path ]\n if len(entries) < before_len:\n if print_changes:\n print(\"Remove %s from index\" % bvh_path)\n else:\n print(\"WARNING: no entry for %s found for removal\" % bvh_path)\n\n append_index_entries(entries, index)\n\n# All filtering options (--start, --end, --name-match, --with[out]-tag etc)\n# are ignored when adding new entries and instead it's as if all the new\n# entries were selected for any edit operations...\nif args.add:\n i = len(index)\n for path in args.add:\n if path.endswith(\".json\"):\n with open(path, 'r') as fp:\n entries = json.load(fp)\n if print_changes:\n print(\"Merge %d entries from %s\" % (len(entries), path))\n append_index_entries(entries, index)\n for entry in entries:\n process_entry(entry, i)\n i+=1\n else:\n rel_path = normalize_path(path)\n\n if rel_path in filename_map:\n print('WARNING: Not re-adding %s to index' % rel_path)\n continue\n\n new_entry = { 'file': rel_path }\n filename_map[rel_path] = new_entry\n\n index.append(new_entry)\n if print_changes:\n print(\"Add %s to index\" % rel_path)\n process_entry(new_entry, i)\n i+=1\nelse:\n end = args.end\n if end == 0:\n end = len(index)\n\n for i in range(args.start, end):\n entry = index[i]\n\n blacklisted=False\n if 'tags' in entry and 'blacklist' in entry['tags']:\n blacklisted = True\n\n if args.blacklisted and not blacklisted:\n continue\n\n if args.non_blacklisted and blacklisted:\n continue\n\n tags_whitelist = args.with_tag\n if tags_whitelist:\n matched_whitelist=False\n if 'tags' in entry:\n for tag in tags_whitelist:\n if tag in entry['tags']:\n matched_whitelist=True\n break\n if not matched_whitelist:\n continue\n\n tags_blacklist = args.without_tag\n if tags_blacklist:\n matched_blacklist=False\n if 'tags' in entry:\n for tag in tags_blacklist:\n if tag in entry['tags']:\n matched_blacklist=True\n break\n if matched_blacklist:\n continue\n\n if args.name_match:\n if 'name' not in entry:\n continue\n matched_name=False\n for match in args.name_match:\n if fnmatch.fnmatch(entry['name'], match):\n matched_name = True\n break\n if not matched_name:\n continue\n\n if args.file_match:\n matched_filename=False\n for match in args.file_match:\n norm_match = normalize_path(match)\n if fnmatch.fnmatch(entry['file'], norm_match):\n matched_filename = True\n break\n if not matched_filename:\n continue\n\n process_entry(entry, args.start + i)\n i+=1\n\nif not args.dry_run:\n with open(args.index_filename, 'w') as fp:\n json.dump(index, fp, indent=4, sort_keys=True)\n\n\nhbars = [u\"\\u0020\", u\"\\u258f\", u\"\\u258e\", u\"\\u258d\", u\"\\u258b\", u\"\\u258a\", u\"\\u2589\"]\nmax_bar_width = 10\n\n\n# outputs the percentage bar (made from hbars) calculated from provided values\ndef get_percentage_bar(value, max_entries):\n bar_len = int(max_bar_width * 6 * value / max_entries)\n bar_output = \"\"\n for i in range(0, max_bar_width):\n if bar_len > 6:\n bar_output += hbars[6]\n bar_len -= 6\n else:\n bar_output += hbars[bar_len]\n bar_len = 0\n return bar_output\n\n\nprint(\"\")\nprint(\"Summary of index contents:\")\n\nwith open(args.index_filename, 'r+') as fp:\n index = json.load(fp)\n\n print(\"\")\n full_len = len(index)\n n_blacklisted = len([x for x in index if 'tags' in x and 'blacklist' in x['tags']])\n print(\"%d non-blacklisted entries\" % (full_len - n_blacklisted))\n print(\"%d blacklisted entries\" % n_blacklisted)\n\n tag_count = {}\n for e in index:\n if 'tags' in e:\n if 'blacklist' in e['tags']:\n continue\n for tag in e['tags']:\n tag_count[tag] = tag_count.get(tag, 0) + 1\n\n print(\"\")\n print(\"Index tags (ignoring blacklisted entries):\")\n print(\"\")\n print(' {:<15s}{:<10s}{:<8s}|{:<10s}|'.format(\"TAG NAME\", \"COUNT\", \"PERCENT\", \" \"))\n print('-' * 80)\n for (key, val) in sorted(tag_count.items(),\n key=lambda kv: (-kv[1], kv[0])):\n count = tag_count[key]\n percentage = count / full_len * 100\n bar = get_percentage_bar(count, full_len)\n print(' {:<15s}{:<10d}{:<8.2f}|{:<10s}|'.format(key, count, percentage, bar))\n\n\n\n\n\n\n", "sub_path": "glimpse-mocap-indexer.py", "file_name": "glimpse-mocap-indexer.py", "file_ext": "py", "file_size_in_byte": 14717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 32, "usage_type": "call"}, {"api_name": "ntpath.basename", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "ntpath.normpath", "line_number": 205, "usage_type": "call"}, {"api_name": "ntpath.normcase", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 242, "usage_type": "call"}, {"api_name": "json.load", "line_number": 267, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 334, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 344, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 355, "usage_type": "call"}, {"api_name": "json.load", "line_number": 380, "usage_type": "call"}]} +{"seq_id": "388908170", "text": "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nsentiment_analyzer = SentimentIntensityAnalyzer()\n\ndef classify_sentiment(str):\n sentiment_values = sentiment_analyzer.polarity_scores(str)\n sentiment = \"\"\n for k, v in sentiment_values.items():\n if k == 'compound' and v > 0.5:\n continue\n elif v > 0.5:\n sentiment = k\n return sentiment", "sub_path": "sentiment_classifier.py", "file_name": "sentiment_classifier.py", "file_ext": "py", "file_size_in_byte": 402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer", "line_number": 2, "usage_type": "call"}]} +{"seq_id": "38412855", "text": "# -*- coding: utf-8 -*-\n#\n# ramstk.dao.programdb.RAMSTKOpLoad.py is part of The RAMSTK Project\n#\n# All rights reserved.\n# Copyright 2007 - 2017 Doyle Rowland doyle.rowland reliaqual com\n\"\"\"RAMSTKOpLoad Table Module.\"\"\"\n\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.orm import relationship\n\n# Import other RAMSTK modules.\nfrom ramstk.Utilities import none_to_default\nfrom ramstk.dao.RAMSTKCommonDB import RAMSTK_BASE\n\n\nclass RAMSTKOpLoad(RAMSTK_BASE):\n \"\"\"\n Class to represent the table ramstk_op_load in the RAMSTK Program database.\n\n This table shares a Many-to-One relationship with ramstk_mechanism.\n This table shares a One-to-Many relationship with ramstk_op_stress.\n This table shares a One-to-Many relationship with ramstk_test_method.\n \"\"\"\n\n __tablename__ = 'ramstk_op_load'\n __table_args__ = {'extend_existing': True}\n\n mechanism_id = Column(\n 'fld_mechanism_id',\n Integer,\n ForeignKey('ramstk_mechanism.fld_mechanism_id'),\n nullable=False)\n load_id = Column(\n 'fld_load_id',\n Integer,\n primary_key=True,\n autoincrement=True,\n nullable=False)\n\n description = Column('fld_description', String(512), default='')\n damage_model = Column('fld_damage_model', String(512), default='')\n priority_id = Column('fld_priority_id', Integer, default=0)\n\n # Define the relationships to other tables in the RAMSTK Program database.\n mechanism = relationship('RAMSTKMechanism', back_populates='op_load')\n op_stress = relationship(\n 'RAMSTKOpStress', back_populates='op_load', cascade='all,delete')\n test_method = relationship(\n 'RAMSTKTestMethod', back_populates='op_load', cascade='all,delete')\n\n is_mode = False\n is_mechanism = False\n is_opload = True\n is_opstress = False\n is_testmethod = False\n\n def get_attributes(self):\n \"\"\"\n Retrieve the current values of the RAMSTKOpLoad data model attributes.\n\n :return: {mechanism_id, load_id, description, damage_model,\n priority_id} pairs\n :rtype: dict\n \"\"\"\n _attributes = {\n 'mechanism_id': self.mechanism_id,\n 'load_id': self.load_id,\n 'description': self.description,\n 'damage_model': self.damage_model,\n 'priority_id': self.priority_id\n }\n\n return _attributes\n\n def set_attributes(self, attributes):\n \"\"\"\n Set the RAMSTKOpLoad data model attributes.\n\n :param dict attributes: values to assign to instance attributes.\n :return: (_code, _msg); the error code and error message.\n :rtype: tuple\n \"\"\"\n _error_code = 0\n _msg = \"RAMSTK SUCCESS: Updating RAMSTKOpLoad {0:d} attributes.\". \\\n format(self.load_id)\n\n try:\n self.description = str(\n none_to_default(attributes['description'], ''))\n self.damage_model = str(\n none_to_default(attributes['damage_model'], ''))\n self.priority_id = int(\n none_to_default(attributes['priority_id'], 0))\n except KeyError as _err:\n _error_code = 40\n _msg = \"RAMSTK ERROR: Missing attribute {0:s} in attribute \" \\\n \"dictionary passed to \" \\\n \"RAMSTKOpLoad.set_attributes().\".format(_err)\n\n return _error_code, _msg\n", "sub_path": "src/ramstk/dao/programdb/RAMSTKOpLoad.py", "file_name": "RAMSTKOpLoad.py", "file_ext": "py", "file_size_in_byte": 3436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "ramstk.dao.RAMSTKCommonDB.RAMSTK_BASE", "line_number": 17, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 36, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 43, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 49, "usage_type": "call"}, {"api_name": "ramstk.Utilities.none_to_default", "line_number": 90, "usage_type": "call"}, {"api_name": "ramstk.Utilities.none_to_default", "line_number": 92, "usage_type": "call"}, {"api_name": "ramstk.Utilities.none_to_default", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "422426753", "text": "from pathlib import Path\nimport argparse\nimport os\n\nimport numpy as np\nimport tifffile as tiff\nfrom tqdm import tqdm\n# from PIL import Image, ImageSequence\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Splits a tiff stack into separate tiff files')\n\n parser.add_argument('--tiffstack', type=str,\n help='Full file path of tiff stack input')\n parser.add_argument('--outpath', type=str,\n help='Full path for output files')\n parser.add_argument('--datatype', type=str,\n help='Cast images as a particular dtype (uint8/uint16/uint64)')\n # parser.add_argument('--scale_to_datatype', action='store_true',\n # help='flag to scale the input datatype to the datatype specified')\n\n args = parser.parse_args()\n\n # validate args\n if args.tiffstack is None:\n raise ValueError('No input file')\n\n return args\n\n\ndef expand_stack(args):\n stackfile = Path(args.tiffstack)\n\n if args.outpath is None:\n outname = stackfile.parent / stackfile.stem\n else:\n outname = Path(args.outpath)\n if not outname.exists():\n outname.mkdir()\n\n stack = tiff.imread(str(stackfile))\n\n num_slices = len(stack)\n digits = len(str(abs(num_slices)))\n outfname = '{0:0{1}d}'\n\n for idx, page in tqdm(enumerate(stack), total=num_slices):\n numpypage = np.array(page, dtype=args.datatype)\n tiff.imsave(\n str(outname / (outfname.format(idx, digits) + '.tif')),\n data=numpypage)\n\n\ndef main():\n args = parse_args()\n\n expand_stack(args)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "scripts/expand_tiff_stacks.py", "file_name": "expand_tiff_stacks.py", "file_ext": "py", "file_size_in_byte": 1667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 34, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 39, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "tifffile.imsave", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "271776005", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 16 09:28:55 2018\nGaussian Processes\n\n@see: http://katbailey.github.io/post/gaussian-processes-for-dummies/\n@author: Daniel Wehner\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as pl\n\n# -----------------------------------------------------------------------------\n# Test data\n# -----------------------------------------------------------------------------\n\n# test data\nn = 50\nX_test = np.linspace(-5, 5, n).reshape(-1, 1)\n\n\ndef kernel(a, b, param):\n \"\"\"\n Gaussian RBF kernel.\n \n :param a:\n :param b:\n :param param:\n \"\"\"\n sq_dist = np.sum(a**2, 1).reshape(-1, 1) + np.sum(b**2, 1) - 2 * np.dot(a, b.T)\n return np.exp(-.5 * (1/param) * sq_dist)\n\n\nparam = 0.1\nK_ss = kernel(X_test, X_test, param)\n\n# get cholesky decomposition (square root) of the covariance matrix\nL = np.linalg.cholesky(K_ss + 1e-15 * np.eye(n))\n\n# sample 3 sets of standard normals for our test points and\n# multiply them by the square root of the covariance matrix\nf_prior = np.dot(L, np.random.normal(size=(n, 3)))\n\n# Now let's plot the 3 sampled functions.\npl.plot(X_test, f_prior)\npl.axis([-5, 5, -3, 3])\npl.title(\"Three samples from the GP prior\")\npl.show()\n\n# -----------------------------------------------------------------------------\n# Real data\n# -----------------------------------------------------------------------------\n\n# noiseless training data\nX_train = np.array([-4, -3, -2, -1, 1]).reshape(5, 1)\ny_train = np.sin(X_train)\n\n# apply the kernel function to our training points\nK = kernel(X_train, X_train, param)\nL = np.linalg.cholesky(K + 0.00005 * np.eye(len(X_train)))\n\n# compute the mean at our test points\nK_s = kernel(X_train, X_test, param)\nLk = np.linalg.solve(L, K_s)\nmu = np.dot(Lk.T, np.linalg.solve(L, y_train)).reshape((n,))\n\n# compute the standard deviation so we can plot it\ns2 = np.diag(K_ss) - np.sum(Lk**2, axis=0)\nstdv = np.sqrt(s2)\n# draw samples from the posterior at our test points\nL = np.linalg.cholesky(K_ss + 1e-6 * np.eye(n) - np.dot(Lk.T, Lk))\nf_post = mu.reshape(-1, 1) + np.dot(L, np.random.normal(size=(n, 3)))\n\n# plot 5 data points\npl.plot(X_train, y_train, \"bs\", ms=8)\npl.plot(X_test, f_post)\n# two standard deviations\npl.gca().fill_between(X_test.flat, mu - 2 * stdv, mu + 2 * stdv, color=\"#dddddd\")\npl.plot(X_test, mu, \"r--\", lw=2)\npl.axis([-5, 5, -3, 3])\npl.title(\"Three samples from the GP posterior\")\npl.show()", "sub_path": "algorithms/gaussian_processes/gp.py", "file_name": "gp.py", "file_ext": "py", "file_size_in_byte": 2409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linalg.cholesky", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.linalg.cholesky", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.linalg.cholesky", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 72, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "91318744", "text": "#!/usr/bin/env python\nfrom sklearn.externals import joblib\nimport numpy as np\nimport pandas as pd\n\ndef get_sepsis_score(data, model):\n\n num_rows = len(data)\n M1 = joblib.load('model-saved.pkl')\n s_m = np.load('septic_mean.npy', allow_pickle=True)\n ns_m = np.load('Nonseptic_mean.npy', allow_pickle=True)\n All = np.vstack((s_m, ns_m))\n maenAll = np.mean(All, axis=0)\n\n # Pre processing for sLinear Interpolate\n for column in range(data.shape[1]):\n col = data[:, column]\n value = col[~np.isnan(col)]\n indexVal = np.argwhere(~np.isnan(col))\n indexNaN = np.argwhere(np.isnan(col))\n if ((len(value) == 1) & (col.shape[0] > 1)):\n col[np.int(indexNaN[0])] = data[np.int(indexVal[0]), column]\n data[:, column] = col\n\n df = pd.DataFrame.from_records(data)\n\n # sLinear Interpolate and linear approach\n df.interpolate(method='slinear', inplace=True)\n df.interpolate(method='linear', inplace=True)\n\n ## impute rest of NaN value with mean Value\n data = np.array(df)\n for column in range(data.shape[1]):\n col = data[:, column]\n value = col[np.isnan(col)]\n if len(value) > 0:\n col[np.isnan(col)] = maenAll[column]\n data[:, column] = col\n\n df = pd.DataFrame.from_records(data)\n\n predicted = M1.predict(data)\n\n score = np.random.rand(len(data), 1)\n for i in range(len(data)):\n if predicted[i]==0:\n score[i] = 0.4\n else:\n score[i] = 0.6\n\n label = np.copy(predicted)\n\n return score, label\n\ndef load_sepsis_model():\n\n return None\n", "sub_path": "get_sepsis_score.py", "file_name": "get_sepsis_score.py", "file_ext": "py", "file_size_in_byte": 1598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sklearn.externals.joblib.load", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "43658320", "text": "#!/usr/bin/env python3\n# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.\n# This program or module is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version. It is provided for educational\n# purposes and is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n\nimport collections\nimport pickle\nimport socket\nimport struct\nimport sys\n\nimport Console\n\nVERSION = 1\n\n\nAddress = [\"localhost\", 9653]\nCarTuple = collections.namedtuple(\"CarTuple\", \"seats mileage owner\")\n\n\nclass SocketManager:\n\n def __init__(self, address):\n self.address = address\n\n\n def __enter__(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(self.address)\n return self.sock\n\n\n def __exit__(self, *ignore):\n self.sock.close()\n\n\n\ndef main():\n if len(sys.argv) > 1:\n Address[0] = sys.argv[1]\n call = dict(c=get_car_details, m=change_mileage, o=change_owner,\n n=new_registration, s=stop_server, q=quit)\n menu = (\"(C)ar Edit (M)ileage Edit (O)wner (N)ew car \"\n \"(S)top server (Q)uit\")\n valid = frozenset(\"cmonsq\")\n previous_license = None\n while True:\n action = Console.get_menu_choice(menu, valid, \"c\", True)\n previous_license = call[action](previous_license)\n\n\ndef retrieve_car_details(previous_license):\n license = Console.get_string(\"License\", \"license\",\n previous_license)\n if not license:\n return previous_license, None\n license = license.upper()\n ok, *data = handle_request(\"GET_CAR_DETAILS\", license)\n if ok:\n return license, CarTuple(*data)\n else:\n print(data[0])\n\n if data[0] == 'This license is not registered':\n return retrieve_car_details_prompt(previous_license)\n else:\n return previous_license, None\n\n\ndef retrieve_car_details_prompt(previous_license):\n while True:\n check = Console.get_menu_choice(\n \"Show similar (y/n)?\",\n \"yYnN\",\n force_lower=True,\n default=\"y\"\n )\n if check != 'y':\n break\n\n prefix = Console.get_string(\"Start of license\", \"prefix\")\n ok, *data = handle_request(\"GET_LICENSES\", prefix)\n\n if ok:\n licenses = data[0]\n\n if not licenses:\n print(\"No licenses starting with {}\".format(prefix))\n continue\n\n for index, license in enumerate(licenses, start=1):\n print(\"({}) {}\".format(index, license))\n\n choice = Console.get_integer(\n \"Enter choice (0 to cancel)\",\n minimum=0,\n maximum=len(licenses)\n )\n if choice == 0:\n break\n\n license = licenses[choice - 1].upper()\n\n ok, *data = handle_request(\"GET_CAR_DETAILS\", license)\n if ok:\n return license, CarTuple(*data)\n else:\n break\n\n return previous_license, None\n\n\ndef get_car_details(previous_license):\n license, car = retrieve_car_details(previous_license)\n if car is not None:\n print(\"License: {0}\\nSeats: {seats}\\nMileage: {mileage}\\n\"\n \"Owner: {owner}\".format(license, **car._asdict()))\n return license\n\n\ndef change_mileage(previous_license):\n license, car = retrieve_car_details(previous_license)\n if car is None:\n return previous_license\n mileage = Console.get_integer(\"Mileage\", \"mileage\",\n car.mileage, 0)\n if mileage == 0:\n return license\n ok, *data = handle_request(\"CHANGE_MILEAGE\", license, mileage)\n if not ok:\n print(data[0])\n else:\n print(\"Mileage successfully changed\")\n return license\n\n\ndef change_owner(previous_license):\n license, car = retrieve_car_details(previous_license)\n if car is None:\n return previous_license\n owner = Console.get_string(\"Owner\", \"owner\", car.owner)\n if not owner:\n return license\n ok, *data = handle_request(\"CHANGE_OWNER\", license, owner)\n if not ok:\n print(data[0])\n else:\n print(\"Owner successfully changed\")\n return license\n\n\ndef new_registration(previous_license):\n license = Console.get_string(\"License\", \"license\")\n if not license:\n return previous_license\n license = license.upper()\n seats = Console.get_integer(\"Seats\", \"seats\", 4, 0)\n if not (1 < seats < 10):\n return previous_license\n mileage = Console.get_integer(\"Mileage\", \"mileage\", 0, 0)\n owner = Console.get_string(\"Owner\", \"owner\")\n if not owner:\n return previous_license\n ok, *data = handle_request(\"NEW_REGISTRATION\", license, seats,\n mileage, owner)\n if not ok:\n print(data[0])\n else:\n print(\"Car {0} successfully registered\".format(license))\n return license\n\n\ndef quit(*ignore):\n sys.exit()\n\n\ndef stop_server(*ignore):\n ok, *data = handle_request(\"SHUTDOWN\")\n print(data[0])\n sys.exit()\n\n\ndef handle_request(*items, wait_for_reply=True):\n HeaderStruct = struct.Struct(\"!II\")\n data = pickle.dumps(items, 3)\n\n try:\n with SocketManager(tuple(Address)) as sock:\n sock.sendall(HeaderStruct.pack(len(data), VERSION))\n sock.sendall(data)\n if not wait_for_reply:\n return\n\n size_data = sock.recv(HeaderStruct.size)\n size, version = HeaderStruct.unpack(size_data)\n\n if version != VERSION:\n print(\"Unsupported protocol version %d\" % (version,))\n sys.exit(1)\n\n result = bytearray()\n while True:\n data = sock.recv(4000)\n if not data:\n break\n result.extend(data)\n if len(result) >= size:\n break\n return pickle.loads(result)\n except socket.error as err:\n print(\"{0}: is the server running?\".format(err))\n sys.exit(1)\n\n\nmain()\n", "sub_path": "pip3/chapter_11/car_registration.py", "file_name": "car_registration.py", "file_ext": "py", "file_size_in_byte": 6367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "collections.namedtuple", "line_number": 24, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 46, "usage_type": "attribute"}, {"api_name": "Console.get_menu_choice", "line_number": 54, "usage_type": "call"}, {"api_name": "Console.get_string", "line_number": 59, "usage_type": "call"}, {"api_name": "Console.get_menu_choice", "line_number": 78, "usage_type": "call"}, {"api_name": "Console.get_string", "line_number": 87, "usage_type": "call"}, {"api_name": "Console.get_integer", "line_number": 100, "usage_type": "call"}, {"api_name": "Console.get_integer", "line_number": 131, "usage_type": "call"}, {"api_name": "Console.get_string", "line_number": 147, "usage_type": "call"}, {"api_name": "Console.get_string", "line_number": 159, "usage_type": "call"}, {"api_name": "Console.get_integer", "line_number": 163, "usage_type": "call"}, {"api_name": "Console.get_integer", "line_number": 166, "usage_type": "call"}, {"api_name": "Console.get_string", "line_number": 167, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 180, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 186, "usage_type": "call"}, {"api_name": "struct.Struct", "line_number": 190, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 191, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 205, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 215, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 216, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "420534753", "text": "\"\"\"Script to compute list to perform inference on.\r\n\r\nThis script will output .txt file to the predefined output path (by default defined in\r\nsettings/scripts/compute_inference_list.yaml). The .txt file will hold a list of samples to perform\r\ninference on where each row holds one sample with the format of FileName_Idx. For a description of\r\nthe naming refer to scripts/save_labels_as_dataframe.py doc string.\r\n\r\n\"\"\"\r\nimport logging\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom lib.config.general_config import Config\r\nfrom lib.dataloader.constants.KITTI import TYPE, CLASS_LIST\r\nfrom lib.util.logging_util import configure_logging_verbosity\r\nfrom lib.util.argparse_util import default_config_parse\r\n\r\nLOGGER = logging.getLogger(__name__)\r\n\r\n\r\ndef compute_inference_list(label_path, output_path, seed=42, verbose=False):\r\n \"\"\"Compute inference list and save to disk.\r\n\r\n This method will save a .txt file with each column holding an unique identifier for a label.\r\n For each class n amount of samples are written to the file. n is equal to the minimum amount of\r\n samples for a class. For KITTI, Pedestrian_sitting is the class with the fewest occurrences\r\n (222), so for every class 222 samples would be chosen.\r\n\r\n Args:\r\n label_path (str or pathlib.Path): Path to labels as pickled pandas Data Frame file.\r\n output_path (str or pathlib.Path): Path to save the inference list to.\r\n seed (int): Random seed to enable reproducibility.\r\n verbose (True): Set verbosity.\r\n\r\n \"\"\"\r\n LOGGER.info(\"Compute inference list ... \")\r\n configure_logging_verbosity(verbose=verbose)\r\n random_state = np.random.RandomState(seed)\r\n labels = pd.read_pickle(str(label_path))\r\n n_samples_dict = dict()\r\n\r\n # Count samples per class\r\n for class_types in CLASS_LIST:\r\n n_samples_dict[class_types] = np.sum(labels[TYPE] == class_types)\r\n\r\n # From each class get the same amount of samples like the class with the fewest n of samples\r\n min_n = n_samples_dict[min(n_samples_dict, key=n_samples_dict.get)]\r\n inference_list = []\r\n for class_types in CLASS_LIST:\r\n labels_one_class = labels[labels[TYPE] == class_types]\r\n identifier = random_state.choice(labels_one_class.index.values, size=min_n, replace=False)\r\n inference_list.append(identifier)\r\n\r\n inference_list = [item for sublist in inference_list for item in sublist]\r\n np.savetxt(str(output_path), inference_list, fmt='%s')\r\n\r\n\r\ndef _main():\r\n \"\"\"Main script.\"\"\"\r\n args = default_config_parse(default_config_path='settings/scripts/compute_inference_list.yaml')\r\n configure_logging_verbosity(verbose=args.verbose)\r\n config = Config.build_from_yaml(args.config)\r\n compute_inference_list(**config.config,\r\n verbose=args.verbose)\r\n\r\n\r\nif __name__ == '__main__':\r\n _main()\r\n", "sub_path": "scripts/compute_inference_list.py", "file_name": "compute_inference_list.py", "file_ext": "py", "file_size_in_byte": 2868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "lib.util.logging_util.configure_logging_verbosity", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 40, "usage_type": "call"}, {"api_name": "lib.dataloader.constants.KITTI.CLASS_LIST", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 45, "usage_type": "call"}, {"api_name": "lib.dataloader.constants.KITTI.TYPE", "line_number": 45, "usage_type": "name"}, {"api_name": "lib.dataloader.constants.KITTI.CLASS_LIST", "line_number": 50, "usage_type": "name"}, {"api_name": "lib.dataloader.constants.KITTI.TYPE", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.savetxt", "line_number": 56, "usage_type": "call"}, {"api_name": "lib.util.argparse_util.default_config_parse", "line_number": 61, "usage_type": "call"}, {"api_name": "lib.util.logging_util.configure_logging_verbosity", "line_number": 62, "usage_type": "call"}, {"api_name": "lib.config.general_config.Config.build_from_yaml", "line_number": 63, "usage_type": "call"}, {"api_name": "lib.config.general_config.Config", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "510961379", "text": "import numpy as np\nfrom PIL import Image\n\n\n# convertimos imagen a RGB para cambiar tonalidades rojas, azules y verdes.\ndef convertirImgMatrixRGB(img):\n return np.array(img.convert(\"RGB\"))\n\n# Primer paso para aplicar efecto infrarrojo a imagen es aplicar negativo\ndef convertirImgNegativo(img):\n arrImg = convertirImgMatrixRGB(img)\n for i in range(img.size[1]):\n for j in range(img.size[0]):\n arrImg[i][j] = 255-arrImg[i][j]\n imgNegativo = Image.fromarray(arrImg)\n return imgNegativo\n\ndef rgb(img):\n arrImg=convertirImgMatrixRGB(img)\n r=170\n g=0\n b=100\n for i in range(img.size[1]):\n for j in range(img.size[0]):\n arrImg[i][j][0] = (arrImg[i][j][0]+r)/2\n arrImg[i][j][1] = (arrImg[i][j][1]+g)/2\n arrImg[i][j][2] = (arrImg[i][j][2]+b)/2\n imgRGB=Image.fromarray(arrImg)\n return imgRGB\n\n# ... Aun no sabemos si nos sirve utilizar esta funcion. \ndef sumarImagenes(img1,img2):\n ALFA=0.5\n arrImg1=convertirImgMatrixRGB(img1)\n arrImg2=convertirImgMatrixRGB(img2)\n for i in range(img1.size[1]):\n for j in range(img1.size[0]):\n sumaPixel= (arrImg1[i][j]*(1-alfa))+(arrImg2[i][j]*(alfa))\n arrImg1[i][j]=sumaPixel\n imgSuma=Image.fromarray(arrImg1)\n return imgSuma\n\ndef main():\n img = Image.open('1.jpg')\n imgNegativo = convertirImgNegativo(img)\n imgfinal=rgb(imgNegativo)\n imgfinal.save(\"output.png\")\nmain()\n", "sub_path": "Produccion/ Felipe Alvarez y Juan Cortez/infrarrojo.py", "file_name": "infrarrojo.py", "file_ext": "py", "file_size_in_byte": 1449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 15, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 28, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 40, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "624303484", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nimport inspect\nfrom copy import deepcopy\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy import units as u\nfrom gammapy.modeling.models import Model\nfrom gammapy.maps import MapAxis\n\n__all__ = [\"Estimator\", \"FluxEstimate\"]\n\n\nDEFAULT_UNIT = {\n \"dnde\": u.Unit(\"cm-2 s-1 TeV-1\"),\n \"e2dnde\": u.Unit(\"erg cm-2 s-1\"),\n \"flux\": u.Unit(\"cm-2 s-1\"),\n \"eflux\": u.Unit(\"erg cm-2 s-1\"),\n}\n\nREQUIRED_MAPS = {\n \"dnde\": [\"dnde\"],\n \"e2dnde\": [\"e2dnde\"],\n \"flux\": [\"flux\"],\n \"eflux\": [\"eflux\"],\n \"likelihood\": [\"norm\"],\n}\n\nREQUIRED_COLUMNS = {\n \"dnde\": [\"e_ref\", \"dnde\"],\n \"e2dnde\": [\"e_ref\", \"e2dnde\"],\n \"flux\": [\"e_min\", \"e_max\", \"flux\"],\n \"eflux\": [\"e_min\", \"e_max\", \"eflux\"],\n # TODO: extend required columns\n \"likelihood\": [\"e_min\", \"e_max\", \"e_ref\", \"ref_dnde\", \"norm\"],\n}\n\nREQUIRED_QUANTITIES_SCAN = [\"norm_scan\", \"stat_scan\", \"stat\"]\n\nOPTIONAL_QUANTITIES = {\n \"dnde\": [\"dnde_err\", \"dnde_errp\", \"dnde_errn\", \"dnde_ul\"],\n \"e2dnde\": [\"e2dnde_err\", \"e2dnde_errp\", \"e2dnde_errn\", \"e2dnde_ul\"],\n \"flux\": [\"flux_err\", \"flux_errp\", \"flux_errn\", \"flux_ul\"],\n \"eflux\": [\"eflux_err\", \"eflux_errp\", \"eflux_errn\", \"eflux_ul\"],\n \"likelihood\": [\"norm_err\", \"norm_errn\", \"norm_errp\", \"norm_ul\"],\n}\n\nOPTIONAL_QUANTITIES_COMMON = [\n \"ts\",\n \"sqrt_ts\",\n \"npred\",\n \"npred_excess\",\n \"npred_null\",\n \"stat\",\n \"stat_null\",\n \"niter\",\n \"is_ul\"\n]\n\n\nclass Estimator(abc.ABC):\n \"\"\"Abstract estimator base class.\"\"\"\n\n _available_selection_optional = {}\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def run(self, datasets):\n pass\n\n @property\n def selection_optional(self):\n \"\"\"\"\"\"\n return self._selection_optional\n\n @selection_optional.setter\n def selection_optional(self, selection):\n \"\"\"Set optional selection\"\"\"\n available = self._available_selection_optional\n\n if selection is None:\n self._selection_optional = []\n elif \"all\" in selection:\n self._selection_optional = available\n else:\n if set(selection).issubset(set(available)):\n self._selection_optional = selection\n else:\n difference = set(selection).difference(set(available))\n raise ValueError(f\"{difference} is not a valid method.\")\n\n @staticmethod\n def get_sqrt_ts(ts, norm):\n r\"\"\"Compute sqrt(TS) value.\n\n Compute sqrt(TS) as defined by:\n\n .. math::\n \\sqrt{TS} = \\left \\{\n \\begin{array}{ll}\n -\\sqrt{TS} & : \\text{if} \\ norm < 0 \\\\\n \\sqrt{TS} & : \\text{else}\n \\end{array}\n \\right.\n\n Parameters\n ----------\n ts : `~numpy.ndarray`\n TS value.\n norm : `~numpy.ndarray`\n norm value\n Returns\n -------\n sqrt_ts : `~numpy.ndarray`\n Sqrt(TS) value.\n \"\"\"\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.where(norm > 0, np.sqrt(ts), -np.sqrt(ts))\n\n def _get_energy_axis(self, dataset):\n \"\"\"Energy axis\"\"\"\n if self.energy_edges is None:\n energy_axis = dataset.counts.geom.axes[\"energy\"].squash()\n else:\n energy_axis = MapAxis.from_energy_edges(self.energy_edges)\n\n return energy_axis\n\n def copy(self):\n \"\"\"Copy estimator\"\"\"\n return deepcopy(self)\n\n @property\n def config_parameters(self):\n \"\"\"Config parameters\"\"\"\n pars = {}\n names = self.__init__.__code__.co_varnames\n for name in names:\n if name == \"self\":\n continue\n\n pars[name] = getattr(self, name)\n return pars\n\n def __str__(self):\n s = f\"{self.__class__.__name__}\\n\"\n s += \"-\" * (len(s) - 1) + \"\\n\\n\"\n\n pars = self.config_parameters\n max_len = np.max([len(_) for _ in pars]) + 1\n\n for name, value in sorted(pars.items()):\n if isinstance(value, Model):\n s += f\"\\t{name:{max_len}s}: {value.__class__.__name__}\\n\"\n elif inspect.isclass(value):\n s += f\"\\t{name:{max_len}s}: {value.__name__}\\n\"\n elif isinstance(value, np.ndarray):\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\n else:\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\n\n return s.expandtabs(tabsize=2)\n\n\nclass FluxEstimate:\n \"\"\"A flux estimate produced by an Estimator.\n\n Follows the likelihood SED type description and allows norm values\n to be converted to dnde, flux, eflux and e2dnde\n\n The flux is converted according to the input spectral model. The latter must be the one used to\n obtain the 'norm' values of the input data.\n\n The energy axis is obtained from the input data:\n - directly from the energy `MapAxis if the input data is a `dict` of `Map``\n - from the 'e_min' and 'e_max' columns in the input data is an `~astropy.table.Table`\n\n Parameters\n ----------\n data : dict of `Map` or `Table`\n Mappable containing the sed data with at least a 'norm' entry.\n If data is a Table, it should contain 'e_min' and 'e_max' columns.\n reference_spectral_model : `SpectralModel`\n Reference spectral model used to produce the input data.\n \"\"\"\n\n def __init__(self, data, reference_spectral_model):\n # TODO: Check data\n self._data = data\n\n if hasattr(self._data[\"norm\"], \"geom\"):\n self._energy_axis = self.data[\"norm\"].geom.axes[\"energy\"]\n self._expand_slice = (slice(None), np.newaxis, np.newaxis)\n else:\n # Here we assume there is only one row per energy\n self._energy_axis = MapAxis.from_table(table=data, format=\"gadf-sed\")\n self._expand_slice = slice(None)\n\n # Note that here we could use the specification from dnde_ref to build piecewise PL\n # But does it work beyond min and max centers?\n\n self._reference_spectral_model = reference_spectral_model\n\n @staticmethod\n def _validate_data(data, sed_type, check_scan=False):\n \"\"\"Check that map input is valid and correspond to one of the SED type.\"\"\"\n try:\n keys = data.keys()\n required = set(REQUIRED_MAPS[sed_type])\n except AttributeError:\n keys = data.columns\n required = set(REQUIRED_COLUMNS[sed_type])\n except KeyError:\n raise ValueError(f\"Unknown SED type: '{sed_type}'\")\n\n if check_scan:\n required = required.union(REQUIRED_QUANTITIES_SCAN)\n\n if not required.issubset(keys):\n missing = required.difference(keys)\n raise ValueError(\n \"Missing data / column for sed type '{}':\" \" {}\".format(sed_type, missing)\n )\n\n @property\n def energy_axis(self):\n \"\"\"Energy axis (`MapAxis`)\"\"\"\n return self._energy_axis\n\n @property\n def reference_spectral_model(self):\n \"\"\"Reference spectral model (`SpectralModel`)\"\"\"\n return self._reference_spectral_model\n\n @property\n def data(self):\n return self._data\n\n @property\n def available_quantities(self):\n \"\"\"Available quantities\"\"\"\n try:\n keys = self.data.keys()\n except AttributeError:\n keys = self.data.columns\n\n available_quantities = []\n\n for quantity in OPTIONAL_QUANTITIES[\"likelihood\"] + OPTIONAL_QUANTITIES_COMMON:\n if quantity in keys:\n available_quantities.append(quantity)\n\n return available_quantities\n\n # TODO: add support for scan\n def _check_quantity(self, quantity):\n if quantity not in self.available_quantities:\n raise KeyError(\n f\"Cannot compute required flux quantity. {quantity} \"\n \"is not defined on current flux estimate.\"\n )\n\n @property\n def energy_ref(self):\n \"\"\"Reference energy\"\"\"\n return self.energy_axis.center\n\n @property\n def energy_min(self):\n \"\"\"Energy min\"\"\"\n return self.energy_axis.edges[:-1]\n\n @property\n def energy_max(self):\n \"\"\"Energy max\"\"\"\n return self.energy_axis.edges[1:]\n\n # TODO: keep or remove?\n @property\n def niter(self):\n \"\"\"Number of iterations of fit\"\"\"\n self._check_quantity(\"niter\")\n return self.data[\"niter\"]\n\n @property\n def is_ul(self):\n \"\"\"Number of iterations of fit\"\"\"\n self._check_quantity(\"is_ul\")\n return self.data[\"is_ul\"]\n\n @property\n def npred(self):\n \"\"\"Predicted counts\"\"\"\n self._check_quantity(\"npred\")\n return self.data[\"npred\"]\n\n @property\n def npred_null(self):\n \"\"\"Predicted counts null hypothesis\"\"\"\n self._check_quantity(\"npred_null\")\n return self.data[\"npred_null\"]\n\n @property\n def npred_excess(self):\n \"\"\"Predicted excess counts\"\"\"\n self._check_quantity(\"npred\")\n self._check_quantity(\"npred_null\")\n return self.data[\"npred\"] - self.data[\"npred_null\"]\n\n @property\n def stat(self):\n \"\"\"Fit statistic value\"\"\"\n self._check_quantity(\"stat\")\n return self.data[\"stat\"]\n\n @property\n def stat_null(self):\n \"\"\"Fit statistic value for thenull hypothesis\"\"\"\n self._check_quantity(\"stat_null\")\n return self.data[\"stat_null\"]\n\n @property\n def ts(self):\n \"\"\"ts map (`Map`)\"\"\"\n self._check_quantity(\"ts\")\n return self.data[\"ts\"]\n\n # TODO: just always derive from ts?\n @property\n def sqrt_ts(self):\n \"\"\"sqrt(TS) as defined by:\n\n .. math::\n\n \\sqrt{TS} = \\left \\{\n \\begin{array}{ll}\n -\\sqrt{TS} & : \\text{if} \\ norm < 0 \\\\\n \\sqrt{TS} & : \\text{else}\n \\end{array}\n \\right.\n\n \"\"\"\n return self.data[\"sqrt_ts\"]\n\n @property\n def norm(self):\n \"\"\"Norm values\"\"\"\n return self.data[\"norm\"]\n\n @property\n def norm_err(self):\n \"\"\"Norm error\"\"\"\n self._check_quantity(\"norm_err\")\n return self.data[\"norm_err\"]\n\n @property\n def norm_errn(self):\n \"\"\"Negative norm error\"\"\"\n self._check_quantity(\"norm_errn\")\n return self.data[\"norm_errn\"]\n\n @property\n def norm_errp(self):\n \"\"\"Positive norm error\"\"\"\n self._check_quantity(\"norm_errp\")\n return self.data[\"norm_errp\"]\n\n @property\n def norm_ul(self):\n \"\"\"Norm upper limit\"\"\"\n self._check_quantity(\"norm_ul\")\n return self.data[\"norm_ul\"]\n\n @property\n def dnde_ref(self):\n \"\"\"Reference differential flux\"\"\"\n result = self.reference_spectral_model(self.energy_axis.center)\n return result[self._expand_slice]\n\n @property\n def e2dnde_ref(self):\n \"\"\"Reference differential flux * energy ** 2\"\"\"\n energy = self.energy_axis.center\n result = (\n self.reference_spectral_model(energy) * energy ** 2\n )\n return result[self._expand_slice]\n\n @property\n def flux_ref(self):\n \"\"\"Reference integral flux\"\"\"\n energy_min = self.energy_axis.edges[:-1]\n energy_max = self.energy_axis.edges[1:]\n result = self.reference_spectral_model.integral(energy_min, energy_max)\n return result[self._expand_slice]\n\n @property\n def eflux_ref(self):\n \"\"\"Reference energy flux\"\"\"\n energy_min = self.energy_axis.edges[:-1]\n energy_max = self.energy_axis.edges[1:]\n result = self.reference_spectral_model.energy_flux(energy_min, energy_max)\n return result[self._expand_slice]\n\n @property\n def dnde(self):\n \"\"\"Return differential flux (dnde) SED values.\"\"\"\n return self.norm * self.dnde_ref\n\n @property\n def dnde_err(self):\n \"\"\"Return differential flux (dnde) SED errors.\"\"\"\n return self.norm_err * self.dnde_ref\n\n @property\n def dnde_errn(self):\n \"\"\"Return differential flux (dnde) SED negative errors.\"\"\"\n return self.norm_errn * self.dnde_ref\n\n @property\n def dnde_errp(self):\n \"\"\"Return differential flux (dnde) SED positive errors.\"\"\"\n return self.norm_errp * self.dnde_ref\n\n @property\n def dnde_ul(self):\n \"\"\"Return differential flux (dnde) SED upper limit.\"\"\"\n return self.norm_ul * self.dnde_ref\n\n @property\n def e2dnde(self):\n \"\"\"Return differential energy flux (e2dnde) SED values.\"\"\"\n return self.norm * self.e2dnde_ref\n\n @property\n def e2dnde_err(self):\n \"\"\"Return differential energy flux (e2dnde) SED errors.\"\"\"\n return self.norm_err * self.e2dnde_ref\n\n @property\n def e2dnde_errn(self):\n \"\"\"Return differential energy flux (e2dnde) SED negative errors.\"\"\"\n return self.norm_errn * self.e2dnde_ref\n\n @property\n def e2dnde_errp(self):\n \"\"\"Return differential energy flux (e2dnde) SED positive errors.\"\"\"\n return self.norm_errp * self.e2dnde_ref\n\n @property\n def e2dnde_ul(self):\n \"\"\"Return differential energy flux (e2dnde) SED upper limit.\"\"\"\n return self.norm_ul * self.e2dnde_ref\n\n @property\n def flux(self):\n \"\"\"Return integral flux (flux) SED values.\"\"\"\n return self.norm * self.flux_ref\n\n @property\n def flux_err(self):\n \"\"\"Return integral flux (flux) SED values.\"\"\"\n return self.norm_err * self.flux_ref\n\n @property\n def flux_errn(self):\n \"\"\"Return integral flux (flux) SED negative errors.\"\"\"\n return self.norm_errn * self.flux_ref\n\n @property\n def flux_errp(self):\n \"\"\"Return integral flux (flux) SED positive errors.\"\"\"\n return self.norm_errp * self.flux_ref\n\n @property\n def flux_ul(self):\n \"\"\"Return integral flux (flux) SED upper limits.\"\"\"\n return self.norm_ul * self.flux_ref\n\n @property\n def eflux(self):\n \"\"\"Return energy flux (eflux) SED values.\"\"\"\n return self.norm * self.eflux_ref\n\n @property\n def eflux_err(self):\n \"\"\"Return energy flux (eflux) SED errors.\"\"\"\n return self.norm_err * self.eflux_ref\n\n @property\n def eflux_errn(self):\n \"\"\"Return energy flux (eflux) SED negative errors.\"\"\"\n return self.norm_errn * self.eflux_ref\n\n @property\n def eflux_errp(self):\n \"\"\"Return energy flux (eflux) SED positive errors.\"\"\"\n return self.norm_errp * self.eflux_ref\n\n @property\n def eflux_ul(self):\n \"\"\"Return energy flux (eflux) SED upper limits.\"\"\"\n return self.norm_ul * self.eflux_ref\n", "sub_path": "gammapy/estimators/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 14707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "astropy.units.Unit", "line_number": 15, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 15, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 16, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 16, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 17, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 17, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 18, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 18, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 61, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 67, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.errstate", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 122, "usage_type": "call"}, {"api_name": "gammapy.maps.MapAxis.from_energy_edges", "line_number": 129, "usage_type": "call"}, {"api_name": "gammapy.maps.MapAxis", "line_number": 129, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 154, "usage_type": "call"}, {"api_name": "gammapy.modeling.models.Model", "line_number": 157, "usage_type": "argument"}, {"api_name": "inspect.isclass", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 197, "usage_type": "attribute"}, {"api_name": "gammapy.maps.MapAxis.from_table", "line_number": 200, "usage_type": "call"}, {"api_name": "gammapy.maps.MapAxis", "line_number": 200, "usage_type": "name"}]} +{"seq_id": "262841089", "text": "# -*- coding: utf-8 -*-\nfrom webapp.extensions import db\nfrom flask.ext.script import Command as BaseCommand, prompt_bool\nfrom webapp.models import User, Sale, District, Landscape, Service, Amenity\nfrom random import randint\n\n\nclass Command(BaseCommand):\n \"\"\"create scheme\"\"\"\n\n def run(self):\n import logging\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('sqlalchemy.pool').setLevel(logging.INFO)\n logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)\n logging.getLogger('sqlalchemy.orm').setLevel(logging.INFO)\n logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)\n if prompt_bool(\"Recreate all table in db?\"):\n db.drop_all()\n db.create_all()\n user = User(name=u'Admin', email='trent.clainor@gmail.com', password='111111', role='admin')\n db.session.add(user)\n\n user = User(name=u'Thai Star', email='thaistarasia@gmail.com', password='123456', role='admin')\n db.session.add(user)\n\n district = District(name=u'Камала', name_en=u'Kamala')\n db.session.add(district)\n district = District(name=u'Ката', name_en=u'Kata')\n db.session.add(district)\n db.session.flush()\n\n landscape = Landscape(name=u'Море', name_en=u'Sea')\n db.session.add(landscape)\n landscape = Landscape(name=u'Сад', name_en=u'Garden')\n db.session.add(landscape)\n db.session.flush()\n landscape = Landscape(name=u'Холмы', name_en=u'Hills')\n db.session.add(landscape)\n db.session.flush()\n\n service1 = Service(name=u'Уборка', name_en=u'Cleaning')\n db.session.add(service1)\n db.session.flush()\n service2 = Service(name=u'Ухаживание за садом', name_en=u'Gardening')\n db.session.add(service2)\n db.session.flush()\n service3 = Service(name=u'Чистка бассейна', name_en=u'Pool cleaning')\n db.session.add(service3)\n db.session.flush()\n service4 = Service(name=u'Консьерж', name_en=u'Concierge')\n db.session.add(service4)\n db.session.flush()\n service5 = Service(name=u'Повар', name_en=u'Cook')\n db.session.add(service5)\n db.session.flush()\n services = []\n services.append(service1)\n services.append(service4)\n services.append(service5)\n\n amenity1 = Amenity(name=u'Кухня', name_en=u'Kitchen')\n db.session.add(amenity1)\n db.session.flush()\n amenity2 = Amenity(name=u'Wi-Fi Интернет', name_en=u'Wi-Fi Internet')\n db.session.add(amenity2)\n db.session.flush()\n amenity3 = Amenity(name=u'Кабельное ТВ', name_en=u'Cable TV')\n db.session.add(amenity3)\n db.session.flush()\n amenity4 = Amenity(name=u'Открытая парковка', name_en=u'Open Parking')\n db.session.add(amenity4)\n db.session.flush()\n amenity5 = Amenity(name=u'Холодильник', name_en=u'Fridge')\n db.session.add(amenity5)\n db.session.flush()\n amenities = []\n amenities.append(amenity1)\n amenities.append(amenity4)\n amenities.append(amenity5)\n\n sale = Sale(\n name=u'Вилла Isolde',\n name_en=u'Villa Isolde',\n sale_type='villa',\n district_id=district.get_id(),\n landscape_id=landscape.get_id(),\n premium=True,\n price='770000',\n currency='thb',\n area=232.0,\n bathrooms=4,\n bedrooms=4,\n land_area=163.0,\n )\n db.session.add(sale)\n for service in services:\n sale.services.append(service)\n for amenity in amenities:\n sale.amenities.append(amenity)\n db.session.commit()\n", "sub_path": "interfaces/scripts/resetdb.py", "file_name": "resetdb.py", "file_ext": "py", "file_size_in_byte": 4175, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.ext.script.Command", "line_number": 8, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.ext.script.prompt_bool", "line_number": 18, "usage_type": "call"}, {"api_name": "webapp.extensions.db.drop_all", "line_number": 19, "usage_type": "call"}, {"api_name": "webapp.extensions.db", "line_number": 19, "usage_type": "name"}, {"api_name": "webapp.extensions.db.create_all", "line_number": 20, "usage_type": "call"}, {"api_name": "webapp.extensions.db", "line_number": 20, "usage_type": "name"}, {"api_name": "webapp.models.User", "line_number": 21, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 22, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 22, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 22, "usage_type": "name"}, {"api_name": "webapp.models.User", "line_number": 24, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 25, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 25, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 25, "usage_type": "name"}, {"api_name": "webapp.models.District", "line_number": 27, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 28, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 28, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 28, "usage_type": "name"}, {"api_name": "webapp.models.District", "line_number": 29, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 30, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 30, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 30, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 31, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 31, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 31, "usage_type": "name"}, {"api_name": "webapp.models.Landscape", "line_number": 33, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 34, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 34, "usage_type": "name"}, {"api_name": "webapp.models.Landscape", "line_number": 35, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 36, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 36, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 37, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 37, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 37, "usage_type": "name"}, {"api_name": "webapp.models.Landscape", "line_number": 38, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 39, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 39, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 39, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 40, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 40, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 40, "usage_type": "name"}, {"api_name": "webapp.models.Service", "line_number": 42, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 43, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 43, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 43, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 44, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 44, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 44, "usage_type": "name"}, {"api_name": "webapp.models.Service", "line_number": 45, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 46, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 46, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 46, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 47, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 47, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 47, "usage_type": "name"}, {"api_name": "webapp.models.Service", "line_number": 48, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 49, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 49, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 49, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 50, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 50, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 50, "usage_type": "name"}, {"api_name": "webapp.models.Service", "line_number": 51, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 52, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 52, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 52, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 53, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 53, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 53, "usage_type": "name"}, {"api_name": "webapp.models.Service", "line_number": 54, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 55, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 55, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 55, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 56, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 56, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 56, "usage_type": "name"}, {"api_name": "webapp.models.Amenity", "line_number": 62, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 63, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 63, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 63, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 64, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 64, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 64, "usage_type": "name"}, {"api_name": "webapp.models.Amenity", "line_number": 65, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 66, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 66, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 66, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 67, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 67, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 67, "usage_type": "name"}, {"api_name": "webapp.models.Amenity", "line_number": 68, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 69, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 69, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 70, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 70, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 70, "usage_type": "name"}, {"api_name": "webapp.models.Amenity", "line_number": 71, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 72, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 72, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 72, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 73, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 73, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 73, "usage_type": "name"}, {"api_name": "webapp.models.Amenity", "line_number": 74, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 75, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 75, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 75, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.flush", "line_number": 76, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 76, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 76, "usage_type": "name"}, {"api_name": "webapp.models.Sale", "line_number": 82, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session.add", "line_number": 96, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 96, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 96, "usage_type": "name"}, {"api_name": "webapp.extensions.db.session.commit", "line_number": 101, "usage_type": "call"}, {"api_name": "webapp.extensions.db.session", "line_number": 101, "usage_type": "attribute"}, {"api_name": "webapp.extensions.db", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "151997120", "text": "import unittest,requests \nimport os, sys\nsys.path.append(os.getcwd())\nfrom utils.dbtools import chaxun\nfrom utils.filetools import read\n\n\n@unittest.skip(\"屏蔽类\")\nclass TestCaseLogout(unittest.TestCase):\n\n\n def test_01_logout(self):\n #print(\"hello test!\")\n u =\"http://49.232.185.181:2333/logout\"\n token= read()\n h = {\"Content-Type\":\"application/json\",\"token\":token}\n res= requests.get(url=u, headers=h)\n print(res.text)\n assert res.status_code ==200\n assert res.json()[\"status\"] ==200\n \n", "sub_path": "UnitestTestHttp/case/test_03_logout.py", "file_name": "test_03_logout.py", "file_ext": "py", "file_size_in_byte": 552, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 3, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "utils.filetools.read", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "463566845", "text": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport threading\nimport unittest\n\nfrom nose.plugins.attrib import attr\n\nfrom WMCore.DAOFactory import DAOFactory\nfrom WMCore.DataStructs.Run import Run\nfrom WMCore.Services.Requests import Requests, JSONRequests\nfrom WMCore.WMBS.File import File\nfrom WMCore.WMBS.Fileset import Fileset\nfrom WMCore.WMBS.Job import Job\nfrom WMCore.WMBS.JobGroup import JobGroup\nfrom WMCore.WMBS.Subscription import Subscription\nfrom WMCore.WMBS.Workflow import Workflow\nfrom WMCore.WMFactory import WMFactory\nfrom WMQuality.TestInit import TestInit\n\n\nclass WMBSServiceTest(unittest.TestCase):\n def setUp(self):\n \"\"\"\n _setUp_\n\n Setup the database and logging connection. Try to create all of the\n WMBS tables. Also, create some dummy locations.\n\n This doesn't start server automatically.\n You need to start server before - make sure change self.server_url,\n if it is not the same as given one - localhost:8080.\n\n WMCORE/src/python/WMCore/WebTools/Root.py --ini=WMCORE/src/python/WMCore/HTTPFrontEnd/WMBSDefaultConfig.py\n \"\"\"\n self.server_url = 'http://localhost:8081'\n self.testInit = TestInit(__file__)\n self.testInit.setLogging()\n self.testInit.setDatabaseConnection()\n self.testInit.setSchema(customModules=[\"WMCore.WMBS\"],\n useDefault=False)\n\n myThread = threading.currentThread()\n self.daofactory = DAOFactory(package=\"WMCore.WMBS\",\n logger=myThread.logger,\n dbinterface=myThread.dbi)\n\n locationAction = self.daofactory(classname=\"Locations.New\")\n locationAction.execute(siteName=\"test.site.ch\")\n locationAction.execute(siteName=\"base.site.ch\")\n testSubscription, testFileA, testFileB, testFileC = \\\n self.createSubscriptionWithFileABC()\n self.createTestJob(testSubscription, 'TestJob1', testFileA)\n self.createTestJob(testSubscription, 'TestJob2', testFileB)\n self.createTestJob(testSubscription, 'TestJob3', testFileC)\n\n return\n\n def tearDown(self):\n \"\"\"\n _tearDown_\n\n Drop all the WMBS tables.\n \"\"\"\n myThread = threading.currentThread()\n\n factory = WMFactory(\"WMBS\", \"WMCore.WMBS\")\n destroy = factory.loadObject(myThread.dialect + \".Destroy\")\n myThread.transaction.begin()\n destroyworked = destroy.execute(conn=myThread.transaction.conn)\n if not destroyworked:\n raise Exception(\"Could not complete WMBS tear down.\")\n myThread.transaction.commit()\n\n def createSubscriptionWithFileABC(self):\n \"\"\"\"\n _createSubscriptionWithFileABC_\n\n Create a subscription where the input fileset has three files. Also\n create a second subscription that has acquired two of the files.\n \"\"\"\n testWorkflow = Workflow(spec=\"spec.xml\", owner=\"Simon\",\n name=\"wf001\", task=\"Test\")\n testWorkflow.create()\n testWorkflow2 = Workflow(spec=\"specBOGUS.xml\", owner=\"Simon\",\n name=\"wfBOGUS\", task=\"Test\")\n testWorkflow2.create()\n\n testFileA = File(lfn=\"/this/is/a/lfnA\", size=1024, events=20,\n locations=set([\"test.site.ch\"]))\n testFileA.addRun(Run(1, *[45]))\n\n testFileB = File(lfn=\"/this/is/a/lfnB\", size=1024, events=20,\n locations=set([\"test.site.ch\"]))\n testFileB.addRun(Run(1, *[46]))\n\n testFileC = File(lfn=\"/this/is/a/lfnC\", size=1024, events=20,\n locations=set([\"test.site.ch\"]))\n testFileC.addRun(Run(2, *[48]))\n\n testFileA.create()\n testFileB.create()\n testFileC.create()\n\n testFileset = Fileset(name=\"TestFileset\")\n testFileset.create()\n\n testFileset.addFile(testFileA)\n testFileset.addFile(testFileB)\n testFileset.addFile(testFileC)\n testFileset.commit()\n\n testSubscription = Subscription(fileset=testFileset,\n workflow=testWorkflow)\n testSubscription.create()\n testSubscription2 = Subscription(fileset=testFileset,\n workflow=testWorkflow2)\n testSubscription2.create()\n testSubscription2.acquireFiles([testFileA, testFileB])\n\n # return (testSubscription, testFileset, testWorkflow, testFileA,\n # testFileB, testFileC)\n\n return (testSubscription, testFileA, testFileB, testFileC)\n\n def createTestJob(self, testSubscription, jobName, *testFiles):\n \"\"\"\n _createTestJob_\n\n Create a test job with two files as input. This will also create the\n appropriate workflow, jobgroup and subscription.\n \"\"\"\n\n testJobGroup = JobGroup(subscription=testSubscription)\n testJobGroup.create()\n\n testFiles = list(testFiles)\n testJob = Job(name=jobName, files=testFiles)\n testJob[\"couch_record\"] = \"somecouchrecord\"\n testJob[\"location\"] = \"test.site.ch\"\n testJob.create(group=testJobGroup)\n\n def wmbsServiceSetup(self, argstring, kargs={}, returnType='text'):\n\n if returnType == 'json':\n request = JSONRequests(self.server_url)\n else:\n request = Requests(self.server_url)\n results = request.get(\"/wmbs/%s/\" % argstring, kargs)\n\n return results\n\n @attr('integration')\n def testAllMethods(self):\n pass\n\n @attr('integration')\n def testJobs(self):\n print(\"\\nTesting jobs service: Should return all the job id and state of jobs\")\n print(self.wmbsServiceSetup('jobs'))\n\n @attr('integration')\n def testJobCount(self):\n print(\"\\nTesting job count service: Should return the job count by and state of jobs\")\n\n print(self.wmbsServiceSetup('jobcount'))\n\n @attr('integration')\n def testJobsBySubs(self):\n print(\"\\nTesting jobsbysubs service: Should return the jobs by given fileset and workflow and specified time\")\n param = {\"fileset_name\": 'TestFileset', 'workflow_name': 'wf001', 'state_time': 0}\n print(self.wmbsServiceSetup('jobsbysubs', param))\n\n @attr('integration')\n def testJobCountBySubsAndRun(self):\n print(\"\\nTesting jobcountbysubs service: Should return the job count by given subscription and run\")\n param = {\"fileset_name\": 'TestFileset', 'workflow_name': 'wf001', 'run': 1}\n print(self.wmbsServiceSetup('jobcountbysubs', param))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "test/python/WMCore_t/HTTPFrontEnd_t/WMBS_t/WMBS_t.py", "file_name": "WMBS_t.py", "file_ext": "py", "file_size_in_byte": 6665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unittest.TestCase", "line_number": 23, "usage_type": "attribute"}, {"api_name": "WMQuality.TestInit.TestInit", "line_number": 38, "usage_type": "call"}, {"api_name": "threading.currentThread", "line_number": 44, "usage_type": "call"}, {"api_name": "WMCore.DAOFactory.DAOFactory", "line_number": 45, "usage_type": "call"}, {"api_name": "threading.currentThread", "line_number": 66, "usage_type": "call"}, {"api_name": "WMCore.WMFactory.WMFactory", "line_number": 68, "usage_type": "call"}, {"api_name": "WMCore.WMBS.Workflow.Workflow", "line_number": 83, "usage_type": "call"}, {"api_name": "WMCore.WMBS.Workflow.Workflow", "line_number": 86, "usage_type": "call"}, {"api_name": "WMCore.WMBS.File.File", "line_number": 90, "usage_type": "call"}, {"api_name": "WMCore.DataStructs.Run.Run", "line_number": 92, "usage_type": "call"}, {"api_name": "WMCore.WMBS.File.File", "line_number": 94, "usage_type": "call"}, {"api_name": "WMCore.DataStructs.Run.Run", "line_number": 96, "usage_type": "call"}, {"api_name": "WMCore.WMBS.File.File", "line_number": 98, "usage_type": "call"}, {"api_name": "WMCore.DataStructs.Run.Run", "line_number": 100, "usage_type": "call"}, {"api_name": "WMCore.WMBS.Fileset.Fileset", "line_number": 106, "usage_type": "call"}, {"api_name": "WMCore.WMBS.Subscription.Subscription", "line_number": 114, "usage_type": "call"}, {"api_name": "WMCore.WMBS.Subscription.Subscription", "line_number": 117, "usage_type": "call"}, {"api_name": "WMCore.WMBS.JobGroup.JobGroup", "line_number": 135, "usage_type": "call"}, {"api_name": "WMCore.WMBS.Job.Job", "line_number": 139, "usage_type": "call"}, {"api_name": "WMCore.Services.Requests.JSONRequests", "line_number": 147, "usage_type": "call"}, {"api_name": "WMCore.Services.Requests.Requests", "line_number": 149, "usage_type": "call"}, {"api_name": "nose.plugins.attrib.attr", "line_number": 154, "usage_type": "call"}, {"api_name": "nose.plugins.attrib.attr", "line_number": 158, "usage_type": "call"}, {"api_name": "nose.plugins.attrib.attr", "line_number": 163, "usage_type": "call"}, {"api_name": "nose.plugins.attrib.attr", "line_number": 169, "usage_type": "call"}, {"api_name": "nose.plugins.attrib.attr", "line_number": 175, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "489727766", "text": "import csv\nfrom collections import defaultdict\nimport tfkit.utility.tok as tok\n\n\ndef get_data_from_file(fpath):\n tasks = defaultdict(list)\n task = 'default'\n with open(fpath, 'r', encoding='utf8', newline='') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n context, start, end = row\n yield tasks, task, context, [start, end]\n\n\ndef preprocessing_data(item, tokenizer, maxlen=512, handle_exceed='slide', **kwargs):\n tasks, task, input, target = item\n param_dict = {'input': input, 'tokenizer': tokenizer, 'target': target, 'maxlen': maxlen,\n 'handle_exceed': handle_exceed}\n yield get_feature_from_data, param_dict\n\n\ndef get_feature_from_data(tokenizer, input, target=None, maxlen=512, handle_exceed='slide', **kwargs):\n feature_dict_list = []\n\n mapping_index = []\n pos = 1 # cls as start 0\n input_text_list = input.split(\" \")\n for i in input_text_list:\n for _ in range(len(tokenizer.tokenize(i))):\n if _ < 1:\n mapping_index.append({'char': i, 'pos': pos})\n pos += 1\n\n t_input_list, t_pos_list = tok.handle_exceed(tokenizer, input, maxlen - 2, mode=handle_exceed)\n for t_input, t_pos in zip(t_input_list, t_pos_list): # -2 for cls and sep:\n row_dict = dict()\n row_dict['target'] = [0, 0]\n tokenized_input = [tok.tok_begin(tokenizer)] + t_input + [tok.tok_sep(tokenizer)]\n input_id = tokenizer.convert_tokens_to_ids(tokenized_input)\n if target is not None:\n start, end = target\n ori_start = start = int(start)\n ori_end = end = int(end)\n ori_ans = input_text_list[ori_start:ori_end]\n start -= t_pos[0]\n end -= t_pos[0]\n if mapping_index[start]['pos'] > ori_end or start < 0 or start > maxlen or end >= maxlen - 2:\n start = 0\n end = 0\n else:\n for map_pos, map_tok in enumerate(mapping_index[t_pos[0]:]):\n if t_pos[0] < map_tok['pos'] <= t_pos[1]:\n length = len(tokenizer.tokenize(map_tok['char']))\n if map_pos < ori_start:\n start += length - 1\n if map_pos < ori_end:\n end += length - 1\n if ori_ans != tokenized_input[start + 1:end + 1] and tokenizer.tokenize(\n \" \".join(ori_ans)) != tokenized_input[start + 1:end + 1] and start != end != 0:\n continue\n row_dict['target'] = [start + 1, end + 1] # cls +1\n\n mask_id = [1] * len(input_id)\n mask_id.extend([0] * (maxlen - len(mask_id)))\n row_dict['mask'] = mask_id\n input_id.extend([0] * (maxlen - len(input_id)))\n row_dict['input'] = input_id\n row_dict['raw_input'] = tokenized_input\n feature_dict_list.append(row_dict)\n\n return feature_dict_list\n", "sub_path": "tfkit/model/qa/dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 2960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 10, "usage_type": "call"}, {"api_name": "tfkit.utility.tok.handle_exceed", "line_number": 35, "usage_type": "call"}, {"api_name": "tfkit.utility.tok", "line_number": 35, "usage_type": "name"}, {"api_name": "tfkit.utility.tok.tok_begin", "line_number": 39, "usage_type": "call"}, {"api_name": "tfkit.utility.tok", "line_number": 39, "usage_type": "name"}, {"api_name": "tfkit.utility.tok.tok_sep", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "230897984", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import HtmlResponse\nfrom jobparser.items import JobparserItem\n\nclass SjruSpider(scrapy.Spider):\n name = 'sjru'\n allowed_domains = ['superjob.ru']\n\n def __init__(self, text):\n self.start_urls = [f'https://www.superjob.ru/vacancy/search/?keywords={text}']\n\n def parse(self, response:HtmlResponse):\n vacancy_links = response.xpath(\"//div[@class='_3mfro CuJz5 PlM3e _2JVkc _3LJqf']/a/@href\").extract()\n for link in vacancy_links:\n yield response.follow(link, callback=self.vacancy_parse)\n\n next_page = response.xpath(\"//a[@class='icMQ_ _1_Cht _3ze9n f-test-button-dalshe f-test-link-Dalshe']/@href\").extract()\n yield response.follow(next_page, callback=self.parse)\n\n def vacancy_parse(self, response: HtmlResponse):\n name = response.xpath(\"//h1[@class='_3mfro rFbjy s1nFK _2JVkc']/text()\").extract()\n salary = response.xpath(\"//span[@class='_3mfro _2Wp8I ZON4b PlM3e _2JVkc']/text()\").extract()\n company = response.xpath(\"//h2[@class='_3mfro PlM3e _2JVkc _2VHxz _3LJqf _15msI']/text()\").extract()\n city = response.xpath(\"//span[@class='_3mfro _1hP6a _2JVkc']/text()\").extract_first()\n link = response.url\n source = self.allowed_domains[0]\n yield JobparserItem(name=name, salary=salary, company=company, link=link, source=source, city=city)\n\n\n", "sub_path": "Lesson 5/spiders/sjru.py", "file_name": "sjru.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 13, "usage_type": "name"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 21, "usage_type": "name"}, {"api_name": "jobparser.items.JobparserItem", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "94216442", "text": "# -*- coding: utf-8 -*-\nfrom zipfile import ZipFile\n\nimport requests\n\n\ndef add_reciprocal_relation(relation2id, triplets, prefix='inv@'):\n num_relation = len(relation2id)\n relation2id_all = {prefix + k: v + num_relation for k, v in relation2id.items()}\n relation2id_all.update(relation2id)\n\n triplets_all = []\n for data in triplets:\n reverse_data = []\n for _1, _2, _3 in data:\n reverse_data.append((_3, prefix + _2, _1))\n\n data += reverse_data\n triplets_all.append(data)\n\n print(f\"Adding reciprocal relations\\n\"\n f\" number of relations: {len(relation2id_all)}\\n\")\n\n return relation2id_all, triplets_all\n\n\ndef read_zip(path: str, mode='hrt'):\n data = {'train.tsv': [], 'valid.tsv': [], 'test.tsv': []}\n\n entity2id = {}\n relation2id = {}\n with ZipFile(path, 'r') as zp:\n for file in data:\n with zp.open(file) as fp:\n _, _, triplets = read_tsv(fp, entity2id, relation2id, mode)\n data[file].extend(triplets)\n\n return entity2id, relation2id, data['train.tsv'], data['valid.tsv'], data['test.tsv']\n\n\ndef read_tsv(fp, entity2id: dict = None, relation2id: dict = None, mode='hrt'):\n assert mode in ('hrt', 'rht', 'htr')\n entity2id = {} if entity2id is None else entity2id\n relation2id = {} if relation2id is None else relation2id\n triplets = []\n\n while True:\n\n line = fp.readline()\n\n if isinstance(line, bytes):\n line = line.decode()\n\n line = line.strip()\n\n if not line:\n break\n\n _1, _2, _3 = line.split('\\t')\n\n # default 'hrt'\n if mode == 'rht':\n _1, _2, _3 = _2, _1, _3\n elif mode == 'htr':\n _1, _2, _3 = _1, _3, _2\n\n if _1 not in entity2id:\n entity2id[_1] = len(entity2id)\n\n if _2 not in relation2id:\n relation2id[_2] = len(relation2id)\n\n if _3 not in entity2id:\n entity2id[_3] = len(entity2id)\n\n triplets.append((_1, _2, _3))\n\n return entity2id, relation2id, triplets\n\n\nURL = \"https://docs.google.com/uc?export=download\"\n\n\ndef download_from_google_drive(id, destination):\n print(\"Downloading from google drive. Trying to fetch {}\".format(destination))\n\n def get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\n def save_response_content(response, destination):\n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\n session = requests.Session()\n\n response = session.get(URL, params={'id': id}, stream=True)\n token = get_confirm_token(response)\n\n if token:\n params = {'id': id, 'confirm': token}\n response = session.get(URL, params=params, stream=True)\n\n save_response_content(response, destination)\n", "sub_path": "Relational_Link_Prediction/Knowledge_Embedding/package/dataset/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "zipfile.ZipFile", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "557211672", "text": "import json\nimport pymysql\n\n\n# 获取城市数据\ndef get_city_data():\n\n db = pymysql.Connect(host='localhost', port=3306, user='root', password='root', database='tpp', charset='utf8')\n cursor = db.cursor()\n\n with open('citys.json', encoding='GBK') as fp:\n city = json.load(fp)\n returnValue = city.get('returnValue')\n # print(returnValue)\n\n # 事务开启\n db.begin()\n\n keys = returnValue.keys() # 字母\n # 遍历所有字母\n for key in keys:\n city_list = returnValue.get(key)\n # print(city_list)\n\n # 插入字母数据\n # cursor.execute('insert into city_letter(letter) values(\"%s\")' % key)\n\n # 遍历每个字母下的所有城市\n for city_dict in city_list:\n id = city_dict.get('id')\n parentId = city_dict.get('parentId')\n regionName = city_dict.get('regionName')\n cityCode = city_dict.get('cityCode')\n pinYin = city_dict.get('pinYin')\n\n # 插入城市数据\n cursor.execute('select * from city_letter where letter=\"%s\"' % key)\n result = cursor.fetchone()\n # print(result) # (22, 'H')\n letter_id = result[0]\n\n # cursor.execute('insert into city(id, parentId, regionName, cityCode, pinYin, letter) '\n # 'values(%d, %d, \"%s\", %d, \"%s\", %d)' % (id, parentId, regionName, cityCode, pinYin, letter_id))\n\n\n # 提交事务\n db.commit()\n\n cursor.close()\n db.close()\n\n\nif __name__ == \"__main__\":\n get_city_data()\n\n", "sub_path": "code/TPP/resources/citys.py", "file_name": "citys.py", "file_ext": "py", "file_size_in_byte": 1662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pymysql.Connect", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "205839062", "text": "from pathlib import Path\nimport re\n\n\nposts = []\nfor directory in Path(\"assets\").iterdir():\n if directory.is_dir() and re.match(r\"\\d{4}-\\d{2}-\\d{2}-\", directory.name):\n post = Path(\"_posts\", directory.name).with_suffix(\".md\")\n if not post.is_file():\n raise ValueError(f\"missing post: {post}\")\n posts.append(post)\n\nprint(f\"discovered {len(posts)} asset directories for posts\")\n", "sub_path": ".github/workflows/verify-asset-directories.py", "file_name": "verify-asset-directories.py", "file_ext": "py", "file_size_in_byte": 410, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pathlib.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "re.match", "line_number": 7, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "618084750", "text": "#coding=utf-8\n\nimport dbus\nimport gobject\nimport re\nfrom dbus.mainloop.glib import DBusGMainLoop\nfrom multiprocessing import Process, JoinableQueue\nfrom Queue import Empty\nfrom bottle import route, run\nimport logging\nimport webif\nimport ConfigParser\n\nINTERESTING_DEVICES = ['00_0C_8A_46_DC_52']\n\n#we need an configuration interface to bypass stuff properly when problems arise\npa_system_mode = True\npa_dbus_socket_path = \"unix:path=/run/pulse/dbus-socket\"\n\nclass DBusClient:\n\n \"\"\" list of tracked devices read from config file \"\"\"\n bt_dev_list = []\n \n def __init__(self):\n \n DBusGMainLoop(set_as_default=True)\n \n #acquire pulseaudio dbus server and proxy objects\n self.sys_bus = dbus.SystemBus()\n\n if pa_system_mode:\n pa_src_bus = self.sys_bus\n else:\n sess_bus = dbus.SessionBus()\n pa_src_bus = sess_bus\n\n try:\n server_lookup = pa_src_bus.get_object(\"org.PulseAudio1\", \"/org/pulseaudio/server_lookup1\")\n address = server_lookup.Get(\"org.PulseAudio.ServerLookup1\", \"Address\", dbus_interface=\"org.freedesktop.DBus.Properties\")\n except:\n #problem getting server lookup, fall back to manual path\n address = pa_dbus_socket_path\n\n self.pabus = dbus.connection.Connection(address)\n self.pacore = self.pabus.get_object(object_path=\"/org/pulseaudio/core1\")\n\n #create master null sink\n master_sink_path = self.pacore.LoadModule('module-null-sink',{'sink_name' : 'master', 'sink_properties' : 'device.description=MasterOutput'})\n self.master_sink = self.pabus.get_object(object_path=master_sink_path)\n logging.info(\"Registered master sink, module %s\",master_sink_path)\n \n #register interesting signals\n self.pacore.ListenForSignal('org.PulseAudio.Core1.NewSink', dbus.Array(signature='o'))\n self.pacore.ListenForSignal('org.PulseAudio.Core1.SinkRemoved', dbus.Array(signature='o'))\n self.pacore.connect_to_signal('NewSink', self.new_sink)\n self.pacore.connect_to_signal('SinkRemoved', self.del_sink)\n \n #get bluez objects - bluez 5\n bluez_main = self.sys_bus.get_object(\"org.bluez\", \"/\")\n bluez_objects = bluez_main.GetManagedObjects(dbus_interface=\"org.freedesktop.DBus.ObjectManager\")\n\n #figure out adapter path and devices\n bluez_adapter_list = []\n bluez_dev_list = []\n for path in bluez_objects.keys():\n if re.match(r\"/org/bluez/hci[0-9]+\",path) != None:\n #this is an adapter\n bluez_adapter_list.append(path)\n \n if re.match(r\"/org/bluez/hci[0-9]+/dev_[A-F0-9_]+\",path) != None:\n #this is a device\n bluez_dev_list.append(path)\n \n #create list of associated devices (paired?)\n self.bluez_dev_macs = dict([ (re.match(r\"/org/bluez/hci[0-9]/dev_([0-9A-F_]+)\",path).group(1),path) for path in bluez_dev_list ])\n\n def register_bt_devices(self):\n #see if interesting devices exist\n for mac_address in self.bluez_dev_macs.keys():\n if mac_address in self.bt_dev_list:\n bluez_device = self.sys_bus.get_object(\"org.bluez\", self.bluez_dev_macs[mac_address])\n #get notified of device property changes\n bluez_device.connect_to_signal(\"PropertiesChanged\", self.bluez_dev_change, dbus_interface=\"org.freedesktop.DBus.Properties\")\n \n def new_sink(self,sink_path):\n #get proxy\n sink = self.pabus.get_object(object_path=sink_path)\n\n logging.info(\"New PA sink registered: %s\", sink.Get(\"org.PulseAudio.Core1.Device\", \"Name\", dbus_interface=\"org.freedesktop.DBus.Properties\"))\n\n def del_sink(self,sink_path):\n #print \"del_sink\"\n logging.info(\"PA sink removed\")\n \n \n def bluez_dev_change(self,*args,**kwargs):\n logging.debug(\"got bluez PropertiesChanged\")\n\n def get_out(self):\n logging.info(\"Unloading modules\")\n self.master_sink.Unload()\n\n def apply_config(self,config):\n self.bt_dev_list = []\n\n if 'bt_devices' in config.sections():\n for cat, mac in config.items('bt_devices'):\n if cat == 'track':\n self.bt_dev_list.append(mac)\n else:\n #don't really care\n pass\n \n self.register_bt_devices()\n\ndef ManageDBus(conn,config):\n \n try:\n #if creating the object fails we would be trapped\n client = DBusClient()\n client.apply_config(config)\n except:\n raise IOError(\"Failed to setup dbus communications\")\n \n loop = gobject.MainLoop()\n\n def web_iface_comm():\n \n try:\n data = conn.get_nowait()\n conn.task_done()\n except Empty:\n pass\n else:\n logging.debug(\"got_data = %s\", data)\n if data == 'test':\n conn.put('tested ok')\n elif data == 'quit':\n loop.quit()\n\n return True\n\n #interprocess communication handler\n gobject.idle_add(web_iface_comm)\n loop.run()\n \n #cleanup and exit\n client.get_out()\n\n\nif __name__ == \"__main__\":\n \n\n #setup logging\n logging.basicConfig(filename='test.log',level=logging.DEBUG,filemode='w')\n \n #load configurations\n config = ConfigParser.ConfigParser()\n config.read('default.conf')\n\n #horrible decoupling, read other configurations directly here\n bottle_bind_to = config.get('webif', 'bind_to')\n bottle_port = int(config.get('webif', 'port'))\n bottle_enabled = bool(config.get('webif', 'enabled'))\n\n #create processes \n logging.debug(\"Starting\")\n\n if bottle_enabled:\n\n bottle_loop = Process(target=run,kwargs={'app': webif.app, 'host' : bottle_bind_to,'port' : bottle_port})\n \n bottle_loop.start()\n\n logging.debug(\"Started Bottle process\")\n else:\n logging.debug(\"Web interface not enabled\")\n\n try:\n ManageDBus(webif.comm_queue,config)\n except:\n logging.error(\"Failed to set-up dbus communications.\")\n\n logging.debug(\"Shutting down Bottle\")\n\n bottle_loop.terminate()\n\n logging.debug(\"Done\")\n", "sub_path": "dbusag.py", "file_name": "dbusag.py", "file_ext": "py", "file_size_in_byte": 6275, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "dbus.mainloop.glib.DBusGMainLoop", "line_number": 27, "usage_type": "call"}, {"api_name": "dbus.SystemBus", "line_number": 30, "usage_type": "call"}, {"api_name": "dbus.SessionBus", "line_number": 35, "usage_type": "call"}, {"api_name": "dbus.connection.Connection", "line_number": 45, "usage_type": "call"}, {"api_name": "dbus.connection", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 51, "usage_type": "call"}, {"api_name": "dbus.Array", "line_number": 54, "usage_type": "call"}, {"api_name": "dbus.Array", "line_number": 55, "usage_type": "call"}, {"api_name": "re.match", "line_number": 67, "usage_type": "call"}, {"api_name": "re.match", "line_number": 71, "usage_type": "call"}, {"api_name": "re.match", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 101, "usage_type": "call"}, {"api_name": "gobject.MainLoop", "line_number": 126, "usage_type": "call"}, {"api_name": "Queue.Empty", "line_number": 133, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 136, "usage_type": "call"}, {"api_name": "gobject.idle_add", "line_number": 145, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 156, "usage_type": "attribute"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 159, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 168, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 172, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 172, "usage_type": "name"}, {"api_name": "webif.app", "line_number": 172, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 176, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 178, "usage_type": "call"}, {"api_name": "webif.comm_queue", "line_number": 181, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 183, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 185, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "358337161", "text": "#!/usr/bin/python\n'''\nSimple client app for chat. No security yet.\n'''\n\nimport sys\nimport socket\nfrom time import sleep\nimport threading\nimport curses\n\nMAX_MSG_LEN= 1024\nMAX_CONN_RETRIES= 3\n\n# Global variable indiating connection status (to server obviously)\nconnected= False\n\n# Global variables for curses screen\nwindow= None\nwindow_h= 0\nwindow_w= 0\n\ndef main():\n\tif len(sys.argv)==3:\n\t\tHOST= sys.argv[1]\n\t\tPORT= int(sys.argv[2])\n\telif len(sys.argv)==2:\n\t\tHOST= sys.argv[1]\n\t\tPORT= 10001\n\telse:\n\t\tHOST= \"127.0.0.1\"\n\t\tPORT= 10001\n\n\ts= socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tglobal connected\n\t\n\tfor i in range(1, MAX_CONN_RETRIES+1):\n\t\ttry:\n\t\t\ts.connect((HOST, PORT))\n\t\t\ts.settimeout(2)\n\t\t\tconnected= True\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint(\"Cannot connect to server {srv_addr}. Attempt {attempt_no}\".format(srv_addr=HOST, attempt_no=i))\n\t\telse:\n\t\t\tprint(\"Connected to server {srv_addr}\".format(srv_addr=HOST))\n\t\t\n\t\tsleep(5) # Sleep for 5 sec.\n\t\t\t\n\t# Check if connected to server\n\tif connected != True:\n\t\tprint(\"Connecting to server failed. Exiting\")\n\t\texit(1) # End of executing!\n\t\t\n\tth3= threading.Thread(target=task_display)\n\tth3.start()\n\t\n\tth1= threading.Thread(None, task_read_input, None, ('#', s))\n\tth1.start()\n\t\n\tth2= threading.Thread(None, task_recv_msg, None, (s,))\n\tth2.start()\n\t\n\treturn\t\t\t\t\n# end of main\n\ndef task_display():\n\tglobal window\n\tglobal window_h\n\tglobal window_w\n\n\ttry:\n\t\twindow= curses.initscr()\n\t\twindow_h, window_w= window.getmaxyx() # Check currect window size\n\t\t\t\n\t\ti= 0\n\t\twhile True:\n\t\t\twindow.refresh()\n\t\t\tsleep(0.1)\n\texcept:\n\t\tpass\n\tfinally:\n\t\tconnected= False\n\t\tcurses.endwin()\n\n\treturn\n\ndef task_read_input(prompt, s):\n\tglobal connected\n\tglobal window\n\t\n\twhile connected:\n\t\twindow.addstr(window_h-1, 0, prompt)\n\t\twindow.move(window_h-1, len(prompt)+ 1)\n\t\tDATA= window.getstr()\n\t\twindow.insertln()\n\t\t\n\t\tDATA= DATA.strip()\n\t\t\n\t\tif DATA == \"\" or DATA == None:\n\t\t\tcontinue\n\t\telif len(DATA) > MAX_MSG_LEN:\n\t\t\tprint(\"***\\nERROR: Message is too long\\n***\")\n\t\t\tcontinue\n\t\t\n\t\tsend_msg(DATA, s)\t\n\t\t\n\t\tif DATA.upper() == \"EXIT\":\n\t\t\tsleep(3) # Slepp for 3 sec to allow clean up communication\n\t\t\ts.close()\n\t\t\tconnected= False\n\t#print(\"Returning from {0} thread\". format(task_read_input))\n\treturn\n# end of read_input\n\ndef task_recv_msg(sock):\n\tglobal connected\n\tglobal window\n\t\n\tcurrent_line=0\n\t\n\twhile connected:\n\t\ttry:\n\t\t\tr_data= sock.recv(MAX_MSG_LEN)\n\t\texcept socket.timeout:\n\t\t\tcontinue\n\t\texcept:\n\t\t\tprint(\"Cannot receive data\")\n\t\t\tcontinue\n\t\telse:\n\t\t\t# Print message to screen and get currect cursor position\n\t\t\tcurrent_h, currect_w= window.getyx()\n\t\t\twindow.addstr(current_line, 0, r_data.decode())\n\n\t\t\t# Check if current line not exceeded window size\n\t\t\tif current_line >= window_h-3:\n\t\t\t\twindow.scroll()\n\t\t\telse:\n\t\t\t\tcurrent_line+= 1\n\t\t\t\t\n\n\t\t\t# Move coursor back to input line\n\t\t\twindow.move(current_h, currect_w)\n\t\t\t\n\t\n\t#print(\"Returning from {0} thread\". format(task_recv_msg))\n\treturn\n# end of task_recv_msg\n\ndef send_msg(msg_str, sock):\n\ttry:\n\t\tsock.send(msg_str.encode())\n\texcept:\n\t\tprint(\"Cannot send data to {host}\".format(host=HOST))\n# end of send_msg\n\n# Start program\nmain()\n", "sub_path": "chat_client.py", "file_name": "chat_client.py", "file_ext": "py", "file_size_in_byte": 3117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 34, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 55, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 58, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 61, "usage_type": "call"}, {"api_name": "curses.initscr", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "curses.endwin", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 109, "usage_type": "call"}, {"api_name": "socket.timeout", "line_number": 125, "usage_type": "attribute"}]} +{"seq_id": "212019691", "text": "import maml_rl.envs\nimport gym\nimport numpy as np\nimport torch\nimport json\nimport os\n\nfrom maml_rl.metalearner import MetaLearner\nfrom maml_rl.policies import CategoricalMLPPolicy, NormalMLPPolicy\nfrom maml_rl.baseline import LinearFeatureBaseline\nfrom maml_rl.sampler import BatchSampler\n\nfrom tensorboardX import SummaryWriter\n\n#changed aggregate to sum from mean for armd bandit problem.\ndef total_rewards(episodes_rewards, aggregation=torch.mean):\n rewards = torch.mean(torch.stack([aggregation(torch.sum(rewards, dim=0))\n for rewards in episodes_rewards], dim=0))\n return rewards.item()\n\nclass args:\n def __init__(self):\n self.env_name = 'Bandit-K5-v0'\n self.num_workers = 8\n self.fast_lr = 0.3\n self.max_kl=0.1\n self.fast_batch_size=10 #number of episodes\n self.meta_batch_size = 40 #number of tasks\n self.num_layers = 2\n self.hidden_size = 100\n self.num_batches=2 #100. Number of iterations\n self.output_folder = 'maml-mab-dir'\n self.gamma = 0.99\n self.tau = 1.0\n self.cg_damping = 1e-5\n self.ls_max_step= 15\n self.device = 'cpu'\n self.first_order = False\n self.cg_iters = 10\n self.ls_max_steps = 10\n self.ls_backtrack_ratio = 0.5\nargs = args()\n\nbatch = 800\nsave_folder = './saves/{0}'.format(args.output_folder)\n\n\nprint(batch)\n\nsampler = BatchSampler(args.env_name, batch_size=args.fast_batch_size,\n num_workers=args.num_workers)\n\nthe_model = CategoricalMLPPolicy(\n int(np.prod(sampler.envs.observation_space.shape)),\n sampler.envs.action_space.n,\n hidden_sizes=(args.hidden_size,) * args.num_layers)\n\nthe_model.load_state_dict(torch.load(os.path.join(save_folder,\n 'policy-{0}.pt'.format(batch))))\n\nbaseline = LinearFeatureBaseline(\n int(np.prod(sampler.envs.observation_space.shape)))\n\nmetalearner = MetaLearner(sampler, the_model, baseline, gamma=args.gamma,\n fast_lr=args.fast_lr, tau=args.tau, device=args.device)\n\ntest_batch_size = 2\ntest_reward_before =[]\ntest_reward_after =[]\n\nfor test_batch in range(test_batch_size):\n #sample one task\n test_task = sampler.sample_tasks(num_tasks=1)\n print(\"test_task: \",test_task)\n sampler.reset_task(test_task[0])\n\n #sample some episodes for that task\n episodes = metalearner.sample(test_task, first_order=args.first_order)\n test_reward_before.append(total_rewards([ep.rewards for ep, _ in episodes]))\n test_reward_after.append(total_rewards([ep.rewards for _, ep in episodes]))\n\nprint(\"before:\",test_reward_before,\"; after: \",test_reward_after,\"\\n\")\nprint(\"before average: \",np.mean(test_reward_before),\n \"after average: \",np.mean(test_reward_after))\n", "sub_path": "play_mab.py", "file_name": "play_mab.py", "file_ext": "py", "file_size_in_byte": 2720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.mean", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 17, "usage_type": "call"}, {"api_name": "maml_rl.sampler.BatchSampler", "line_number": 50, "usage_type": "call"}, {"api_name": "maml_rl.policies.CategoricalMLPPolicy", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "maml_rl.baseline.LinearFeatureBaseline", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 62, "usage_type": "call"}, {"api_name": "maml_rl.metalearner.MetaLearner", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "374497166", "text": "from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom ui.Ui_Splash import *\nfrom ui.Ui_Login import *\nfrom ui.Ui_ManagementSystem import *\nfrom ui.Ui_MessageDialog import *\nfrom ui.Ui_UseTimeDialog import *\nfrom utils.ServerDB import *\n\nfrom imageResource.svg import *\n\nimport sys\nimport os\nimport datetime\nimport numpy\nimport requests\nimport json\n\n# UseTimeDialog\n\n\nclass UseTimeDialog(QDialog):\n def __init__(self, data):\n super(UseTimeDialog, self).__init__()\n self.data = data\n self.init()\n self.loading()\n\n def init(self):\n self.initUI()\n self.initInteraction()\n\n def initUI(self):\n self.ui = Ui_UseTimeDialog()\n self.ui.setupUi(self)\n\n def initInteraction(self):\n self.ui.close.clicked.connect(self.reject)\n\n def loading(self):\n self.loadingData()\n\n def loadingData(self):\n for i in range(0, len(self.data)):\n horizentalLayout = QHBoxLayout()\n stuID = QLabel(self.data[i][0])\n stuID.setFont(QFont('微軟正黑體', 10))\n stuID.setAlignment(QtCore.Qt.AlignCenter)\n horizentalLayout.addWidget(stuID)\n seatNum = QLabel(self.data[i][1])\n seatNum.setFont(QFont('微軟正黑體', 10))\n seatNum.setAlignment(QtCore.Qt.AlignCenter)\n horizentalLayout.addWidget(seatNum)\n useTime = QLabel(self.data[i][2])\n useTime.setFont(QFont('微軟正黑體', 10))\n useTime.setAlignment(QtCore.Qt.AlignCenter)\n horizentalLayout.addWidget(useTime)\n self.ui.usetimeareaContentLayout.addLayout(horizentalLayout)\n\n # 重載方法\n def mousePressEvent(self, event):\n self.pressX = event.x()\n self.pressY = event.y()\n\n def mouseMoveEvent(self, event):\n x = event.x()\n y = event.y()\n moveX = x-self.pressX\n moveY = y-self.pressY\n positionX = self.frameGeometry().x() + moveX\n positionY = self.frameGeometry().y() + moveY\n self.move(positionX, positionY)\n\n# Dialog\n\n\nclass MessageDialog(QDialog):\n def __init__(self):\n super(MessageDialog, self).__init__()\n self.init()\n\n def init(self):\n self.initUI()\n self.initInteraction()\n\n def initUI(self):\n self.ui = Ui_MessageDialog()\n self.ui.setupUi(self)\n self.setWindowTitle('提示訊息')\n\n def initInteraction(self):\n self.ui.resultComfirm.clicked.connect(self.accept)\n self.ui.resultCancel.clicked.connect(self.reject)\n self.ui.close.clicked.connect(self.reject)\n\n def setTitle(self, title):\n self.ui.title.setText(title)\n self.ui.title.adjustSize()\n self.ui.title.setGeometry(\n QRect(50, 50, self.ui.title.width(), self.ui.title.height()))\n return\n\n def setMessage(self, message):\n self.ui.message.setText(message)\n return\n\n # 重載方法\n\n def mousePressEvent(self, event):\n self.pressX = event.x()\n self.pressY = event.y()\n\n def mouseMoveEvent(self, event):\n x = event.x()\n y = event.y()\n moveX = x-self.pressX\n moveY = y-self.pressY\n positionX = self.frameGeometry().x() + moveX\n positionY = self.frameGeometry().y() + moveY\n self.move(positionX, positionY)\n\n# 主系統頁面\n\n\nclass ManagementSystem(QMainWindow):\n\n # variable\n interval = 1\n lock = 0\n\n useTimes = []\n onlineStuIDs = []\n onlineSeats = []\n offlineSeats = []\n runningThread = []\n\n def __init__(self):\n super(ManagementSystem, self).__init__()\n self.init()\n self.loading()\n\n def init(self):\n self.initUI()\n self.initInteraction()\n self.initUseTimes()\n\n # 初始化區\n def initUI(self):\n self.ui = Ui_ManagementSystem()\n self.ui.setupUi(self)\n self.setWindowTitle('淡江大學 實習上機系統')\n img = base64.b64decode(tku)\n self.setWindowIcon(QtGui.QIcon(self.svg2pixmap(img, QSize(512, 512))))\n\n def initInteraction(self):\n self.ui.close.clicked.connect(self.clickedClose)\n self.ui.minimal.clicked.connect(self.clickedMinimal)\n\n self.ui.autoOnlineStuID.focused.connect(self.cancelAutoOnlineStuID)\n self.ui.autoOnlineStuID.unfocused.connect(self.cancelAutoOnlineStuID)\n self.ui.autoOnlineStuID.keyDeletePressed.connect(\n self.cancelAutoOnlineStuID)\n self.ui.autoOnlineStuID.returnPressed.connect(\n self.commitAutoOnlineStuID)\n self.ui.autoOnlineStuID.keyRightPressed.connect(\n self.keyRightPressedAutoOnlineStuID)\n self.ui.autoOnlineStuID.keyLeftPressed.connect(\n self.keyLeftPressedAutoOnlineStuID)\n self.ui.autoOnlineStuIDCommit.clicked.connect(\n self.commitAutoOnlineStuID)\n\n self.ui.singleOnlineStuID.focused.connect(\n self.focusedSingleOnlineStuID)\n self.ui.singleOnlineStuID.returnPressed.connect(\n self.commitSingleOnlineStuID)\n self.ui.singleOnlineStuID.keyRightPressed.connect(\n self.keyRightPressedSingleOnlineStuID)\n self.ui.singleOnlineStuID.keyLeftPressed.connect(\n self.keyLeftPressedSingleOnlineStuID)\n self.ui.singleOnlineStuID.keyDownPressed.connect(\n self.keyDownPressedSingleOnlineStuID)\n self.ui.singleOnlineStuIDCommit.clicked.connect(\n self.commitSingleOnlineStuID)\n\n self.ui.singleOnlineSeat.focused.connect(self.focusedSingleOnlineSeat)\n self.ui.singleOnlineSeat.returnPressed.connect(\n self.commitSingleOnlineSeat)\n self.ui.singleOnlineSeat.keyLeftPressed.connect(\n self.keyLeftPressedSingleOnlineSeat)\n self.ui.singleOnlineSeat.keyRightPressed.connect(\n self.keyRightPressedSingleOnlineSeat)\n self.ui.singleOnlineSeat.keyDownPressed.connect(\n self.keyDownPressedSingleOnlineSeat)\n self.ui.singleOnlineSeatCommit.clicked.connect(\n self.commitSingleOnlineSeat)\n\n self.ui.singleOfflineSeat.focused.connect(\n self.focusedSingleOfflineSeat)\n self.ui.singleOfflineSeat.returnPressed.connect(\n self.commitSingleOfflineSeat)\n self.ui.singleOfflineSeat.keyLeftPressed.connect(\n self.keyLeftPressedSingleOfflineSeat)\n self.ui.singleOfflineSeat.keyRightPressed.connect(\n self.keyRightPressedSingleOfflineSeat)\n self.ui.singleOfflineSeat.keyDownPressed.connect(\n self.keyDownPressedSingleOfflineSeat)\n self.ui.singleOfflineSeatCommit.clicked.connect(\n self.commitSingleOfflineSeat)\n\n self.ui.manyOnlineStuID.focused.connect(self.cancelManyOnlineStuID)\n self.ui.manyOnlineStuID.keyDeletePressed.connect(\n self.cancelManyOnlineStuID)\n self.ui.manyOnlineStuID.returnPressed.connect(\n self.commitManyOnlineStuID)\n self.ui.manyOnlineStuID.keyLeftPressed.connect(\n self.keyLeftPressedManyOnlineStuID)\n self.ui.manyOnlineStuID.keyUpPressed.connect(\n self.keyUpPressedManyOnlineStuID)\n self.ui.manyOnlineStuID.keyRightPressed.connect(\n self.keyRightPressedManyOnlineStuID)\n self.ui.manyOnlineStuIDCommit.clicked.connect(\n self.commitManyOnlineStuID)\n\n self.ui.manyOnlineSeatStart.focused.connect(\n self.focusedManyOnlineSeatStart)\n self.ui.manyOnlineSeatEnd.focused.connect(\n self.focusedManyOnlineSeatEnd)\n self.ui.manyOnlineSeatStart.returnPressed.connect(\n self.returnPressedManyOnlineSeatStart)\n self.ui.manyOnlineSeatStart.keyUpPressed.connect(\n self.keyUpPressedManyOnlineSeatStart)\n self.ui.manyOnlineSeatStart.keyLeftPressed.connect(\n self.keyLeftPressedManyOnlineSeatStart)\n self.ui.manyOnlineSeatStart.keyRightPressed.connect(\n self.keyRightPressedManyOnlineSeatStart)\n self.ui.manyOnlineSeatEnd.returnPressed.connect(\n self.commitManyOnlineSeat)\n self.ui.manyOnlineSeatEnd.keyUpPressed.connect(\n self.keyUpPressedManyOnlineSeatEnd)\n self.ui.manyOnlineSeatEnd.keyLeftPressed.connect(\n self.keyLeftPressedManyOnlineSeatEnd)\n self.ui.manyOnlineSeatEnd.keyRightPressed.connect(\n self.keyRightPressedManyOnlineSeatEnd)\n self.ui.manyOnlineSeatCommit.clicked.connect(self.commitManyOnlineSeat)\n\n self.ui.manyOfflineSeatStart.focused.connect(\n self.focusedManyOfflineSeatStart)\n self.ui.manyOfflineSeatEnd.focused.connect(\n self.focusedManyOfflineSeatEnd)\n self.ui.manyOfflineSeatEnd.keyUpPressed.connect(\n self.keyUpPressedManyOfflineSeatEnd)\n self.ui.manyOfflineSeatEnd.keyLeftPressed.connect(\n self.keyLeftPressedManyOfflineSeatEnd)\n self.ui.manyOfflineSeatEnd.keyRightPressed.connect(\n self.keyRightPressedManyOfflineSeatEnd)\n self.ui.manyOfflineSeatStart.returnPressed.connect(\n self.returnPressedManyOfflineSeatStart)\n self.ui.manyOfflineSeatStart.keyUpPressed.connect(\n self.keyUpPressedManyOfflineSeatStart)\n self.ui.manyOfflineSeatStart.keyLeftPressed.connect(\n self.keyLeftPressedManyOfflineSeatStart)\n self.ui.manyOfflineSeatStart.keyRightPressed.connect(\n self.keyRightPressedManyOfflineSeatStart)\n self.ui.manyOfflineSeatEnd.returnPressed.connect(\n self.commitManyOfflineSeat)\n self.ui.manyOfflineSeatCommit.clicked.connect(\n self.commitManyOfflineSeat)\n\n self.ui.logout.clicked.connect(self.clickedLogout)\n self.ui.seatTime.clicked.connect(self.clickedSeatTime)\n self.ui.classSeat.clicked.connect(self.clickedClassSeat)\n self.ui.clear.clicked.connect(self.clickedClear)\n self.ui.clean.clicked.connect(self.clickedClean)\n\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '':\n seat.dropped.connect(self.exchangeSeat)\n\n def initUseTimes(self):\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '':\n self.useTimes.append(None)\n\n # 載入資料區\n def loading(self):\n self.loadingSetting()\n self.loadingInformation()\n self.loadingSeatStatus()\n self.loadingUseTime()\n\n def loadingInformation(self):\n self.loadingDateTime()\n self.loadingWeather()\n self.loadingRoom()\n self.loadingMood()\n\n def loadingDateTime(self):\n self.TimerDatetime = QTimer()\n self.TimerDatetime.timeout.connect(self.updateDateTime)\n self.TimerDatetime.start(1000)\n\n def loadingWeather(self):\n\n self.TimerWeather = QTimer()\n self.TimerWeather.timeout.connect(self.updateWeather)\n self.TimerWeather.singleShot(0, self.updateWeather)\n self.TimerWeather.start(3600000)\n\n def loadingRoom(self):\n self.threadRoomName = ThreadGetRoomName()\n self.threadRoomName.res.connect(self.updateRoomName)\n self.threadRoomName.start()\n\n def loadingMood(self):\n self.TimerMood = QTimer()\n self.TimerMood.timeout.connect(self.updateMood)\n self.TimerMood.singleShot(0, self.updateMood)\n self.TimerMood.start(3600)\n\n def loadingSeatStatus(self):\n self.threadGetSeatStatus = ThreadGetSeatStatus()\n self.threadGetSeatStatus.res.connect(self.updateSeatStatus)\n self.threadGetSeatStatus.start()\n return\n\n def loadingUseTime(self):\n self.TimerUseTime = QTimer()\n self.TimerUseTime.timeout.connect(self.updateUseTime)\n self.TimerUseTime.start(1000)\n\n def loadingSetting(self):\n return\n\n # 導航控制\n def clickedClose(self):\n app = QApplication.instance()\n app.quit()\n\n def clickedMinimal(self):\n self.showMinimized()\n\n # 畫面更新\n def updateDateTime(self):\n now = datetime.datetime.now()\n self.ui.date.setText(now.strftime('%Y/%m/%d %A'))\n self.ui.time.setText(now.strftime('%H:%M:%S'))\n\n def updateWeather(self):\n self.threadUpdateWeather = ThreadGetWeather()\n self.threadUpdateWeather.res.connect(self.updateWeatherShow)\n self.threadUpdateWeather.start()\n\n def updateWeatherShow(self, res):\n self.ui.weather.setText(res)\n\n def updateRoomName(self, res):\n self.ui.location.setText(res+'實習室')\n\n def updateMood(self):\n now = datetime.datetime.now()\n hour = now.hour\n if hour == 8:\n self.ui.mood.setText('早八上班辛苦了,喝杯咖啡吧')\n elif hour == 9:\n self.ui.mood.setText('')\n elif hour == 10:\n self.ui.mood.setText('')\n elif hour == 11:\n self.ui.mood.setText('終於過了上班時間的一半了!')\n elif hour == 12:\n self.ui.mood.setText('')\n elif hour == 13:\n self.ui.mood.setText('快要下班了呦!再堅持一下!')\n elif hour == 14:\n self.ui.mood.setText('嗨,上班快樂')\n elif hour == 15:\n self.ui.mood.setText('')\n elif hour == 16:\n self.ui.mood.setText('')\n elif hour == 17:\n self.ui.mood.setText('')\n elif hour == 18:\n self.ui.mood.setText('')\n elif hour == 19:\n self.ui.mood.setText('')\n elif hour == 20:\n self.ui.mood.setText('')\n elif hour == 21:\n self.ui.mood.setText('')\n elif hour == 22:\n self.ui.mood.setText('大夜班很漫長,對吧?')\n elif hour == 23:\n self.ui.mood.setText('')\n elif hour == 24:\n self.ui.mood.setText('來數數現在天上有幾顆星星吧')\n elif hour == 1:\n self.ui.mood.setText('')\n elif hour == 2:\n self.ui.mood.setText('')\n elif hour == 3:\n self.ui.mood.setText('')\n elif hour == 4:\n self.ui.mood.setText('')\n elif hour == 5:\n self.ui.mood.setText('')\n elif hour == 6:\n self.ui.mood.setText('')\n elif hour == 7:\n self.ui.mood.setText('')\n\n def updateSeatStatus(self, res):\n\n date = datetime.datetime.now().strftime('%Y%m%d ')\n\n row = self.ui.seatTableLayout.rowCount()\n col = self.ui.seatTableLayout.columnCount()\n\n for seatStatus in res:\n flag = False\n for i in range(0, row):\n for j in range(0, col):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '':\n seatNum = int(seat.text())\n if seatNum == seatStatus[1]:\n seat.setToolTip(seatStatus[2].strip())\n flag = True\n if flag == True:\n break\n if flag == True:\n break\n self.useTimes[seatStatus[1]-1] = datetime.datetime.strptime(\n date+seatStatus[3], '%Y%m%d %H:%M:%S')\n\n if self.isClassSeat():\n self.lock = 2\n elif self.isCleanTime():\n self.lock = 1\n\n def updateUseTime(self):\n now = datetime.datetime.now()\n\n if self.lock == 0:\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n seatNum = seat.text()\n if seatNum != '':\n seatNum = int(seatNum)\n if self.useTimes[seatNum-1] == None:\n seat.setStyleSheet('QPushButton{border-style:none; background-color: white; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n else:\n diff = (\n now-self.useTimes[seatNum-1]).total_seconds()\n if diff >= 7200:\n seat.setStyleSheet('QPushButton{border-style:none; background-color: red; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n elif diff >= 3600:\n seat.setStyleSheet('QPushButton{border-style:none; background-color: orange; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n else:\n seat.setStyleSheet('QPushButton{border-style:none; background-color: green; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n\n # 輸入區\n def commitAutoOnlineStuID(self):\n stuID = self.ui.autoOnlineStuID.text()\n\n if stuID == '' and len(self.onlineStuIDs) == 0:\n self.ui.singleOfflineSeat.setFocus(True)\n elif stuID == '':\n self.online()\n else:\n stuID = stuID.zfill(9)\n if stuID not in self.onlineStuIDs:\n self.onlineStuIDs.append(stuID)\n self.ui.autoOnlineStuID.setText('')\n self.ui.autoOnlineStatus.setText(f'您已輸入{len(self.onlineStuIDs)}人')\n\n def commitManyOnlineStuID(self):\n stuID = self.ui.manyOnlineStuID.text()\n\n if stuID == '' and len(self.onlineStuIDs) == 0:\n self.ui.manyOfflineSeatStart.setFocus(True)\n elif stuID == '':\n self.ui.manyOnlineSeatStart.setFocus(True)\n else:\n stuID = stuID.zfill(9)\n if stuID not in self.onlineStuIDs:\n self.onlineStuIDs.append(stuID)\n self.ui.manyOnlineStuID.setText('')\n self.ui.manyOnlineStuIDStatus.setText(\n f'您已輸入{len(self.onlineStuIDs)}人')\n\n def commitSingleOnlineStuID(self):\n stuID = self.ui.singleOnlineStuID.text()\n\n if stuID == '':\n self.ui.singleOfflineSeat.setFocus(True)\n else:\n stuID = stuID.zfill(9)\n self.ui.singleOnlineStuID.setText(stuID)\n self.onlineStuIDs.append(stuID)\n self.ui.singleOnlineSeat.setFocus(True)\n\n def commitSingleOnlineSeat(self):\n seatNum = self.ui.singleOnlineSeat.text()\n\n if seatNum == '':\n self.ui.singleOfflineSeat.setFocus(True)\n else:\n self.onlineSeats.append(seatNum.zfill(2))\n\n if len(self.onlineSeats) == len(self.onlineStuIDs):\n self.online()\n self.ui.singleOnlineStuID.setText('')\n self.ui.singleOnlineSeat.setText('')\n self.ui.singleOnlineStuID.setFocus(True)\n\n def commitSingleOfflineSeat(self):\n seatNum = self.ui.singleOfflineSeat.text()\n\n if seatNum == '':\n self.ui.autoOnlineStuID.setFocus(True)\n else:\n self.ui.singleOfflineSeat.setText('')\n seatNum = seatNum.zfill(2)\n self.offlineSeats.append(seatNum)\n self.offline()\n\n def commitManyOfflineSeat(self):\n if self.ui.manyOfflineSeatStart.text() != '' and self.ui.manyOfflineSeatEnd.text() != '':\n\n seatNumStart = int(self.ui.manyOfflineSeatStart.text())\n seatNumEnd = int(self.ui.manyOfflineSeatEnd.text())\n\n if self.isSeatNumValid(seatNumStart) and self.isSeatNumValid(seatNumEnd) and seatNumStart <= seatNumEnd:\n for i in range(seatNumStart, seatNumEnd+1):\n self.offlineSeats.append(str(i).zfill(2))\n\n self.offline()\n else:\n print('下機失敗')\n\n self.ui.manyOfflineSeatStart.setText('')\n self.ui.manyOfflineSeatEnd.setText('')\n self.ui.manyOfflineSeatStart.setFocus(True)\n\n def commitManyOnlineSeat(self):\n if self.ui.manyOnlineSeatStart.text() != '' and self.ui.manyOnlineSeatEnd.text() != '':\n\n seatNumStart = int(self.ui.manyOnlineSeatStart.text())\n seatNumEnd = int(self.ui.manyOnlineSeatEnd.text())\n\n if self.isSeatNumValid(seatNumStart) and self.isSeatNumValid(seatNumEnd) and seatNumStart <= seatNumEnd:\n for i in range(seatNumStart, seatNumEnd+1):\n self.onlineSeats.append(str(i).zfill(2))\n\n if len(self.onlineStuIDs) == len(self.onlineSeats):\n self.online()\n else:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請確認輸入人數與座位數量一致')\n dialog.exec()\n else:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請確認輸入的座位區間合法且正確')\n dialog.exec()\n\n self.ui.manyOnlineSeatStart.setText('')\n self.ui.manyOnlineSeatEnd.setText('')\n self.ui.manyOnlineStuID.setFocus(True)\n\n # 布林判斷系列\n def isRepeat(self, stuID):\n row = self.ui.seatTableLayout.rowCount()\n col = self.ui.seatTableLayout.columnCount()\n\n for i in range(0, row):\n for j in range(0, col):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.toolTip() == stuID:\n return True\n\n return False\n\n def isLeftBorder(self, i, j):\n if j == 0:\n return True\n elif self.ui.seatTableLayout.itemAtPosition(i, j-1) == None:\n return True\n elif self.ui.seatTableLayout.itemAtPosition(i, j-1).widget().text() == '':\n return True\n\n return False\n\n def isRightBorder(self, i, j):\n if j == self.ui.seatTableLayout.columnCount():\n return True\n elif self.ui.seatTableLayout.itemAtPosition(i, j+1) == None:\n return True\n elif self.ui.seatTableLayout.itemAtPosition(i, j+1).widget().text() == '':\n return True\n\n return False\n\n def isSeatNumValid(self, seatNum):\n seatMax = len(self.useTimes)\n seatNum = int(seatNum)\n if seatNum >= 1 and seatNum <= seatMax:\n return True\n return False\n\n def isAllSeatEmpty(self):\n for seat in self.useTimes:\n if seat != None:\n return False\n return True\n\n def isCleanTime(self):\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '' and seat.toolTip() != '維修時間':\n return False\n return True\n\n def isClassSeat(self):\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '' and seat.toolTip() != '班級上機':\n return False\n return True\n\n # 排位演算法\n def autoArrangeSeats(self):\n\n row = self.ui.seatTableLayout.rowCount()\n col = self.ui.seatTableLayout.columnCount()\n\n resArrange = []\n resRepeat = self.checkRepeat()\n\n for i in range(0, row):\n\n for j in range(0, col):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.toolTip() == '空位':\n if not self.isLeftBorder(i, j):\n flag = False\n for l in range(0, self.interval):\n seat = self.ui.seatTableLayout.itemAtPosition(\n i, j+l)\n if seat == None:\n flag = True\n break\n seat = seat.widget()\n if seat.toolTip() != '空位':\n flag = True\n break\n if flag == True:\n continue\n else:\n j += self.interval\n\n if self.isLeftBorder(i, j):\n if j+len(self.onlineStuIDs) < col:\n if not self.isRightBorder(i, j+len(self.onlineStuIDs)):\n flag = False\n for l in range(0, self.interval):\n seat = self.ui.seatTableLayout.itemAtPosition(\n i, j+len(self.onlineStuIDs)+l)\n if seat == None:\n flag = True\n break\n seat = seat.widget()\n if seat.toolTip() != '空位':\n flag = True\n break\n if flag == True:\n continue\n else:\n break\n else:\n if j+len(self.onlineStuIDs)-1 < col:\n\n if not self.isRightBorder(i, j+len(self.onlineStuIDs)-1):\n flag = False\n for l in range(0, self.interval):\n seat = self.ui.seatTableLayout.itemAtPosition(\n i, j+len(self.onlineStuIDs)+l)\n if seat == None:\n flag = True\n break\n seat = seat.widget()\n if seat.toolTip() != '空位':\n flag = True\n break\n\n if flag == True:\n continue\n else:\n break\n\n for l in range(0, len(self.onlineStuIDs)):\n\n seat = self.ui.seatTableLayout.itemAtPosition(\n i, j+l)\n if seat == None:\n resArrange.clear()\n break\n seat = seat.widget()\n if seat.toolTip() != '空位':\n resArrange.clear()\n break\n else:\n resArrange.append(\n ['N', self.onlineStuIDs[l], seat.text(), i, j+l])\n\n if len(resArrange) == len(self.onlineStuIDs):\n break\n if len(resArrange) == len(self.onlineStuIDs):\n break\n\n if len(resArrange) == 0:\n for stuID in self.onlineStuIDs:\n resArrange.append(['F', stuID, '', -1, -1])\n\n self.onlineStuIDs.clear()\n self.ui.autoOnlineStatus.setText(f'您已輸入{len(self.onlineStuIDs)}人')\n\n return resRepeat+resArrange\n\n def manualArrangeSeats(self):\n resArrange = []\n resRepeat = self.checkRepeat()\n\n for i in range(0, len(self.onlineStuIDs)):\n position = self.getPositionBySeatNum(self.onlineSeats[i])\n seat = self.ui.seatTableLayout.itemAtPosition(\n position[0], position[1]).widget()\n if seat.toolTip() == '空位':\n resArrange.append(\n ['N', self.onlineStuIDs[i], seat.text(), position[0], position[1]])\n else:\n resArrange.append(\n ['P', self.onlineStuIDs[i], seat.text(), position[0], position[1]])\n\n self.onlineStuIDs.clear()\n self.onlineSeats.clear()\n\n return resRepeat+resArrange\n # 上機下機\n\n def online(self):\n if self.lock == 0:\n if len(self.onlineSeats) == 0:\n arrangements = self.autoArrangeSeats()\n else:\n arrangements = self.manualArrangeSeats()\n\n print(arrangements)\n\n for arrangement in arrangements:\n if arrangement[0] == 'R' or arrangement[0] == 'P' or arrangement[0] == 'F':\n continue\n seat = self.ui.seatTableLayout.itemAtPosition(\n arrangement[3], arrangement[4]).widget()\n seat.setToolTip(arrangement[1])\n seat.setStyleSheet('QPushButton{border-style:none; background-color: green; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n self.useTimes[int(arrangement[2])-1] = datetime.datetime.now()\n\n self.runningThread.append(ThreadOnlineComputer(\n arrangement[1], int(arrangement[2])))\n self.runningThread[-1].finished.connect(\n self.threadFinishedOnlineComputer)\n self.runningThread[-1].start()\n\n res = numpy.delete(arrangements, [3, 4], axis=1)\n self.showOnlineResult(res)\n elif self.lock == 1:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請先解除維修時間')\n self.onlineSeats.clear()\n self.onlineStuIDs.clear()\n dialog.exec()\n\n elif self.lock == 2:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請先解除班級上機')\n self.onlineSeats.clear()\n self.onlineStuIDs.clear()\n dialog.exec()\n\n def offline(self):\n if self.lock == 0:\n for seatNum in self.offlineSeats:\n if self.isSeatNumValid(seatNum):\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() == seatNum:\n\n self.runningThread.append(\n ThreadOfflineComputer(seat.toolTip(), int(seatNum)))\n self.runningThread[-1].finished.connect(\n self.threadFinishedOfflineComputer)\n self.runningThread[-1].start()\n\n seat.setToolTip('空位')\n seat.setStyleSheet('QPushButton{border-style:none; background-color: white; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n self.useTimes[int(seatNum)-1] = None\n self.offlineSeats.clear()\n elif self.lock == 1:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請先解除維修時間')\n self.offlineSeats.clear()\n dialog.exec()\n elif self.lock == 2:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請先解除班級上機')\n self.offlineSeats.clear()\n dialog.exec()\n\n def showOnlineResult(self, res):\n output = ''\n\n for item in res:\n if item[0] == 'N':\n output += item[1]+\":排位結果座號為\"+item[2]+\"號\"\n elif item[0] == 'R':\n output += item[1]+\":重複排位,該名學生目前上機座號為\"+item[2]+\"號\"\n elif item[0] == 'P':\n output += item[1]+\":指定位置\"+item[2]+\"號目前非空位,請查核\"\n elif item[0] == 'F':\n output += item[1]+\":排位失敗,目前並無符合設定之空位\"\n output += '\\n'\n dialog = MessageDialog()\n dialog.setTitle('座位分配結果')\n dialog.setMessage('下列為本次上機分發結果:\\n\\n'+output)\n dialog.exec()\n\n def classOnline(self):\n return\n\n def classOffline(self):\n return\n\n def cleanOnline(self):\n self.runningThread.append(\n ThreadCleanOnlineComputer(len(self.useTimes)))\n self.runningThread[-1].finished.connect(\n self.threadFinishedCleanOnlineComputer)\n self.runningThread[-1].start()\n\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '':\n seat.setToolTip('維修時間')\n seat.setStyleSheet('QPushButton{border-style:none; background-color: red; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n self.useTimes[int(seat.text()) -\n 1] = datetime.datetime.now()\n\n def cleanOffline(self):\n\n # 這裡要補維修時間下機程式碼\n\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '':\n seat.setToolTip('空位')\n seat.setStyleSheet('QPushButton{border-style:none; background-color: white; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:hover{border-style:none; background-color: #EEEEEE; border-radius: 5px; border: 1px solid black; padding: 5px;} QPushButton:pressed{border-style:none; background-color: #999999; border-radius: 5px; border: 1px solid black; padding: 5px;}')\n self.useTimes[int(seat.text()) -\n 1] = None\n\n return\n\n def allOffline(self):\n self.offlineSeats.clear()\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.toolTip() != '空位' and seat.text() != '':\n self.offlineSeats.append(seat.text())\n self.offline()\n\n def exchangeSeat(self, fromSeatNum):\n toSeatNum = self.sender().text()\n\n if toSeatNum != fromSeatNum:\n fromSeatPosition = self.getPositionBySeatNum(fromSeatNum)\n toSeatPosition = self.getPositionBySeatNum(toSeatNum)\n\n fromSeat = self.ui.seatTableLayout.itemAtPosition(\n fromSeatPosition[0], fromSeatPosition[1]).widget()\n toSeat = self.ui.seatTableLayout.itemAtPosition(\n toSeatPosition[0], toSeatPosition[1]).widget()\n\n self.offlineSeats.clear()\n self.onlineSeats.clear()\n self.onlineStuIDs.clear()\n\n if fromSeat.toolTip() == '空位' and toSeat.toolTip() == '空位':\n return\n elif fromSeat.toolTip() == '空位' and toSeat.toolTip() != '空位':\n self.offlineSeats.append(toSeatNum)\n self.onlineSeats.append(fromSeatNum)\n self.onlineStuIDs.append(toSeat.toolTip())\n self.offline()\n self.online()\n elif fromSeat.toolTip() != '空位' and toSeat.toolTip() == '空位':\n self.offlineSeats.append(fromSeatNum)\n self.onlineSeats.append(toSeatNum)\n self.onlineStuIDs.append(fromSeat.toolTip())\n self.offline()\n self.online()\n elif fromSeat.toolTip() != '空位' and toSeat.toolTip() != '空位':\n\n stuID1 = fromSeat.toolTip()\n stuID2 = toSeat.toolTip()\n\n self.offlineSeats.append(fromSeatNum)\n self.offlineSeats.append(toSeatNum)\n self.offline()\n\n self.onlineStuIDs.append(stuID1)\n self.onlineSeats.append(toSeatNum)\n self.onlineStuIDs.append(stuID2)\n self.onlineSeats.append(fromSeatNum)\n self.online()\n\n # 網路線程結束區\n\n def threadFinishedOnlineComputer(self):\n sender = self.sender()\n self.runningThread.remove(sender)\n\n def threadFinishedOfflineComputer(self):\n sender = self.sender()\n self.runningThread.remove(sender)\n\n def threadFinishedCleanOnlineComputer(self):\n sender = self.sender()\n self.runningThread.remove(sender)\n\n # UI用戶交互介面區 - 特定按鍵點擊\n def keyRightPressedAutoOnlineStuID(self):\n self.ui.singleOnlineStuID.setFocus(True)\n\n def keyLeftPressedAutoOnlineStuID(self):\n self.ui.singleOfflineSeat.setFocus(True)\n\n def keyRightPressedSingleOnlineStuID(self):\n self.ui.singleOnlineSeat.setFocus(True)\n\n def keyLeftPressedSingleOnlineStuID(self):\n self.ui.autoOnlineStuID.setFocus(True)\n\n def keyDownPressedSingleOnlineStuID(self):\n self.ui.manyOnlineStuID.setFocus(True)\n\n def keyLeftPressedSingleOnlineSeat(self):\n self.ui.singleOnlineStuID.setFocus(True)\n\n def keyRightPressedSingleOnlineSeat(self):\n self.ui.singleOfflineSeat.setFocus(True)\n\n def keyLeftPressedSingleOfflineSeat(self):\n self.ui.singleOnlineSeat.setFocus(True)\n\n def keyRightPressedSingleOfflineSeat(self):\n self.ui.autoOnlineStuID.setFocus(True)\n\n def keyDownPressedSingleOfflineSeat(self):\n self.ui.manyOfflineSeatStart.setFocus(True)\n\n def keyDownPressedSingleOnlineSeat(self):\n self.ui.manyOnlineSeatStart.setFocus(True)\n\n def keyLeftPressedManyOnlineStuID(self):\n self.ui.manyOfflineSeatEnd.setFocus(True)\n\n def keyUpPressedManyOnlineStuID(self):\n self.ui.singleOnlineStuID.setFocus(True)\n\n def keyRightPressedManyOnlineStuID(self):\n self.ui.manyOnlineSeatStart.setFocus(True)\n\n def keyUpPressedManyOnlineSeatStart(self):\n self.ui.singleOnlineSeat.setFocus(True)\n\n def keyRightPressedManyOnlineSeatStart(self):\n self.ui.manyOnlineSeatEnd.setFocus(True)\n\n def keyLeftPressedManyOnlineSeatStart(self):\n self.ui.manyOnlineStuID.setFocus(True)\n\n def keyRightPressedManyOnlineSeatEnd(self):\n self.ui.manyOfflineSeatStart.setFocus(True)\n\n def keyLeftPressedManyOnlineSeatEnd(self):\n self.ui.manyOnlineSeatStart.setFocus(True)\n\n def keyUpPressedManyOnlineSeatEnd(self):\n self.ui.singleOnlineSeat.setFocus(True)\n\n def keyUpPressedManyOfflineSeatEnd(self):\n self.ui.singleOfflineSeat.setFocus(True)\n\n def keyLeftPressedManyOfflineSeatEnd(self):\n self.ui.manyOfflineSeatStart.setFocus(True)\n\n def keyRightPressedManyOfflineSeatEnd(self):\n self.ui.manyOnlineStuID.setFocus(True)\n\n def keyUpPressedManyOfflineSeatStart(self):\n self.ui.singleOfflineSeat.setFocus(True)\n\n def keyRightPressedManyOfflineSeatStart(self):\n self.ui.manyOfflineSeatEnd.setFocus(True)\n\n def keyLeftPressedManyOfflineSeatStart(self):\n self.ui.manyOnlineSeatEnd.setFocus(True)\n\n # UI用戶交互介面區 - cancel\n def cancelAutoOnlineStuID(self):\n self.onlineStuIDs.clear()\n self.ui.autoOnlineStatus.setText(f'您已輸入{len(self.onlineStuIDs)}人')\n\n def cancelManyOnlineStuID(self):\n self.onlineStuIDs.clear()\n self.ui.manyOnlineStuIDStatus.setText(f'您已輸入{len(self.onlineStuIDs)}人')\n\n # UI用戶交互介面區 - focused\n\n def focusedSingleOnlineStuID(self):\n self.onlineStuIDs.clear()\n self.onlineSeats.clear()\n\n def focusedSingleOnlineSeat(self):\n self.onlineSeats.clear()\n\n def focusedSingleOfflineSeat(self):\n self.offlineSeats.clear()\n\n def focusedManyOnlineSeatStart(self):\n self.onlineSeats.clear()\n\n def focusedManyOnlineSeatEnd(self):\n self.onlineSeats.clear()\n\n def focusedManyOfflineSeatStart(self):\n self.offlineSeats.clear()\n\n def focusedManyOfflineSeatEnd(self):\n self.offlineSeats.clear()\n\n # UI用戶交互介面區 - clicked\n\n def clickedLogout(self):\n dialog = MessageDialog()\n dialog.setMessage('請確認是否登出')\n dialog.setTitle('提示訊息')\n if dialog.exec():\n global window\n window.hide()\n window = Login()\n window.show()\n\n def clickedSeatTime(self):\n data = []\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() != '' and seat.toolTip() != '空位':\n time = datetime.datetime.now(\n )-self.useTimes[int(seat.text())-1]\n strTime = f'{time.days:02d}天{time.seconds//3600:02d}時{(time.seconds//60)%60:02d}分'\n data.append((seat.toolTip(), seat.text(), strTime))\n\n dialog = UseTimeDialog(data)\n dialog.exec()\n\n def clickedClassSeat(self):\n if self.lock == 0:\n dialog = MessageDialog()\n dialog.setTitle('提示訊息')\n dialog.setMessage('請問確認是否班級上機')\n if dialog.exec():\n if self.isAllSeatEmpty():\n self.classOnline()\n self.lock = 2\n self.ui.classSeat.setToolTip('班級下機')\n else:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請確認所有座位為空')\n dialog.exec()\n elif self.lock == 2:\n dialog = MessageDialog()\n dialog.setTitle('提示訊息')\n dialog.setMessage('請問是否班級下機')\n if dialog.exec():\n self.classOffline()\n self.lock = 0\n self.ui.classSeat.setToolTip('班級上機')\n elif self.lock == 1:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請先關閉維修時間')\n dialog.exec()\n\n def clickedClear(self):\n dialog = MessageDialog()\n dialog.setTitle('提示訊息')\n dialog.setMessage('請問是否確認清空所有上機狀況')\n if dialog.exec():\n self.allOffline()\n\n def clickedClean(self):\n if self.lock == 0:\n dialog = MessageDialog()\n dialog.setTitle('提��訊息')\n dialog.setMessage('請問是否進入維修時間')\n if dialog.exec():\n if self.isAllSeatEmpty():\n res = self.cleanOnline()\n self.lock = 1\n self.ui.clean.setToolTip('關閉維修')\n else:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請確認所有位置為空')\n dialog.exec()\n elif self.lock == 1:\n dialog = MessageDialog()\n dialog.setTitle('提示訊息')\n dialog.setMessage('請問是否關閉維修時間')\n if dialog.exec():\n self.cleanOffline()\n self.lock = 0\n self.ui.clean.setToolTip('維修時間')\n elif self.lock == 2:\n dialog = MessageDialog()\n dialog.setTitle('錯誤訊息')\n dialog.setMessage('請先解除班級上機')\n dialog.exec()\n # UI用戶交互介面區 - returnPressed\n\n def returnPressedManyOnlineSeatStart(self):\n self.ui.manyOnlineSeatEnd.setFocus(True)\n\n def returnPressedManyOfflineSeatStart(self):\n if self.ui.manyOfflineSeatStart.text() == '':\n self.ui.manyOnlineStuID.setFocus(True)\n else:\n self.ui.manyOfflineSeatEnd.setFocus(True)\n\n # 其他\n\n def getRepeatSeat(self, stuID):\n row = self.ui.seatTableLayout.rowCount()\n col = self.ui.seatTableLayout.columnCount()\n\n for i in range(0, row):\n for j in range(0, col):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.toolTip() == stuID:\n return [i, j]\n\n return [-1, -1]\n\n def getPositionBySeatNum(self, num):\n for i in range(0, self.ui.seatTableLayout.rowCount()):\n for j in range(0, self.ui.seatTableLayout.columnCount()):\n seat = self.ui.seatTableLayout.itemAtPosition(i, j)\n if seat != None:\n seat = seat.widget()\n if seat.text() == num:\n return [i, j]\n\n def checkRepeat(self):\n res = []\n\n for stuID in self.onlineStuIDs:\n if self.isRepeat(stuID):\n repeatSeat = self.getRepeatSeat(stuID)\n repeatSeatNum = self.ui.seatTableLayout.itemAtPosition(\n repeatSeat[0], repeatSeat[1]).widget().text()\n res.append(['R', stuID, repeatSeatNum]+repeatSeat)\n\n for repeatStuID in res:\n if repeatStuID[1] in self.onlineStuIDs:\n self.onlineStuIDs.remove(repeatStuID[1])\n\n return res\n\n # 重載方法\n\n def mousePressEvent(self, event):\n self.pressX = event.x()\n self.pressY = event.y()\n\n def mouseMoveEvent(self, event):\n x = event.x()\n y = event.y()\n moveX = x-self.pressX\n moveY = y-self.pressY\n positionX = self.frameGeometry().x() + moveX\n positionY = self.frameGeometry().y() + moveY\n self.move(positionX, positionY)\n\n def svg2pixmap(self, img, size):\n render = QtSvg.QSvgRenderer(img)\n image = QtGui.QImage(size.width(), size.height(),\n QtGui.QImage.Format_ARGB32)\n painter = QtGui.QPainter(image)\n painter.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)\n image.fill(Qt.Qt.transparent)\n render.render(painter)\n img = QtGui.QPixmap.fromImage(image)\n del painter\n return img\n\n# 登入頁面\n\n\nclass Login(QMainWindow):\n\n def __init__(self):\n super(Login, self).__init__()\n self.initUI()\n self.initInteraction()\n\n def initUI(self):\n self.ui = Ui_Login()\n self.ui.setupUi(self)\n self.setWindowTitle('淡江大學 實習上機系統')\n img = base64.b64decode(tku)\n self.setWindowIcon(QtGui.QIcon(self.svg2pixmap(img, QSize(512, 512))))\n\n def initInteraction(self):\n self.ui.close.clicked.connect(self.clickedClose)\n self.ui.login.clicked.connect(self.clickedLogin)\n\n def clickedClose(self):\n app = QApplication.instance()\n app.quit()\n\n def clickedLogin(self):\n\n self.ui.loading.setVisible(True)\n\n stuID = self.ui.username.text()\n stuPWD = self.ui.password.text()\n\n if stuID == '' and stuPWD == '':\n self.statusSet(5)\n elif stuID == '':\n self.statusSet(6)\n elif stuPWD == '':\n self.statusSet(7)\n else:\n self.threadLogin = ThreadLogin(stuID, stuPWD)\n self.threadLogin.res.connect(self.statusSet)\n self.threadLogin.start()\n\n def statusSet(self, code):\n if code == 0:\n self.ui.status.setText('登入成功,請等候跳轉')\n global window\n window.hide()\n window = ManagementSystem()\n window.show()\n elif code == 1:\n self.ui.status.setText('資料未成功傳遞至伺服器,請重試')\n elif code == 2:\n self.ui.status.setText('帳號或密碼錯誤,請重試')\n elif code == 3:\n self.ui.status.setText('帳號或密碼錯誤,請重試')\n elif code == 4:\n self.ui.status.setText('發生未知錯誤,請聯絡開發人員')\n elif code == 5:\n self.ui.status.setText('請輸入帳號及密碼')\n elif code == 6:\n self.ui.status.setText('請輸入帳號')\n elif code == 7:\n self.ui.status.setText('請輸入密碼')\n\n self.ui.status.adjustSize()\n self.ui.status.setGeometry(QRect(int(250-self.ui.status.width()/2),\n 350, int(self.ui.status.width()), int(self.ui.status.height())))\n\n self.ui.loading.setVisible(False)\n\n def mousePressEvent(self, event):\n self.pressX = event.x()\n self.pressY = event.y()\n\n def mouseMoveEvent(self, event):\n x = event.x()\n y = event.y()\n moveX = x-self.pressX\n moveY = y-self.pressY\n positionX = self.frameGeometry().x() + moveX\n positionY = self.frameGeometry().y() + moveY\n self.move(positionX, positionY)\n\n def svg2pixmap(self, img, size):\n render = QtSvg.QSvgRenderer(img)\n image = QtGui.QImage(size.width(), size.height(),\n QtGui.QImage.Format_ARGB32)\n painter = QtGui.QPainter(image)\n painter.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)\n image.fill(Qt.Qt.transparent)\n render.render(painter)\n img = QtGui.QPixmap.fromImage(image)\n del painter\n return img\n\n# 閃屏頁面\n\n\nclass Splash(QWidget):\n\n def __init__(self):\n super(Splash, self).__init__()\n self.initUI()\n self.initSplash()\n\n def initUI(self):\n self.ui = Ui_Splash()\n self.ui.setupUi(self)\n self.setWindowTitle('淡江大學 實習上機系統')\n img = base64.b64decode(tku)\n self.setWindowIcon(QtGui.QIcon(self.svg2pixmap(img, QSize(512, 512))))\n\n def initSplash(self):\n QTimer.singleShot(5000, self.splash)\n\n def splash(self):\n global window\n window.hide()\n window = Login()\n window.show()\n\n def mousePressEvent(self, event):\n self.pressX = event.x()\n self.pressY = event.y()\n\n def mouseMoveEvent(self, event):\n x = event.x()\n y = event.y()\n moveX = x-self.pressX\n moveY = y-self.pressY\n positionX = self.frameGeometry().x() + moveX\n positionY = self.frameGeometry().y() + moveY\n self.move(positionX, positionY)\n\n def svg2pixmap(self, img, size):\n render = QtSvg.QSvgRenderer(img)\n image = QtGui.QImage(size.width(), size.height(),\n QtGui.QImage.Format_ARGB32)\n painter = QtGui.QPainter(image)\n painter.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)\n image.fill(Qt.Qt.transparent)\n render.render(painter)\n img = QtGui.QPixmap.fromImage(image)\n del painter\n return img\n\n\n# QThread線程區\nclass ThreadLogin(QThread):\n\n res = pyqtSignal(int)\n\n def __init__(self, stuID, stuPWD):\n super(ThreadLogin, self).__init__()\n self.stuID = stuID\n self.stuPWD = stuPWD\n\n def run(self):\n db = ServerDB()\n code = db.checkAccount(self.stuID, self.stuPWD)\n self.res.emit(code)\n\n\nclass ThreadOnlineComputer(QThread):\n\n res = pyqtSignal(int)\n\n def __init__(self, stuID, seatNum):\n super(ThreadOnlineComputer, self).__init__()\n self.stuID = stuID\n self.seatNum = seatNum\n\n def run(self):\n\n print('online結束')\n #db = ServerDB()\n #resultCode = db.onlineComputer(self.stuID, self.seatNum)\n # self.res.emit(resultCode)\n\n\nclass ThreadOfflineComputer(QThread):\n\n res = pyqtSignal(int)\n\n def __init__(self, stuID, seatNum):\n super(ThreadOfflineComputer, self).__init__()\n self.stuID = stuID\n self.seatNum = seatNum\n\n def run(self):\n\n print('offline結束')\n #db = ServerDB()\n #resultCode = db.offlineComputer(self.stuID, self.seatNum)\n # self.res.emit(resultCode)\n\n\nclass ThreadCleanOnlineComputer(QThread):\n\n res = pyqtSignal(int)\n\n def __init__(self, total):\n super(ThreadCleanOnlineComputer, self).__init__()\n self.total = total\n\n def run(self):\n resultCode = 0\n #db = ServerDB()\n # for i in range(1, self.total+1):\n # resultcode = db.onlineComputer('維修時間', i)\n # self.res.emit(resultcode)\n\n\nclass ThreadGetSeatStatus(QThread):\n\n res = pyqtSignal(list)\n\n def __init__(self):\n super(ThreadGetSeatStatus, self).__init__()\n\n def run(self):\n db = ServerDB()\n seatStatus = db.getSeatStatus()\n self.res.emit(seatStatus)\n\n\nclass ThreadGetRoomName(QThread):\n res = pyqtSignal(str)\n\n def __init__(self):\n super(ThreadGetRoomName, self).__init__()\n\n def run(self):\n db = ServerDB()\n roomName = db.getClassroomName()\n self.res.emit(roomName)\n\n\nclass ThreadGetWeather(QThread):\n\n res = pyqtSignal(str)\n\n def __init__(self):\n super(ThreadGetWeather, self).__init__()\n\n def run(self):\n apiKey = 'CWB-21FCA0DA-65E9-484E-AAE1-1F03A2D66022'\n\n url = 'https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-D0047-071' + '?Authorization=' + apiKey + '&limit=' + \\\n '1' + '&offset=' + '0' + '&locationName=' + '%E6%B7%A1%E6%B0%B4%E5%8D%80' + \\\n '&elementName=' + 'Wx' + '&sort=' + 'time'\n response = requests.get(url)\n\n if response.status_code == 200:\n response = response.json()\n weatherDesc = response['records']['locations'][0]['location'][0][\n 'weatherElement'][0]['time'][0]['elementValue'][0]['value']\n self.res.emit(weatherDesc)\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n # app.setWindowIcon(QIcon('./icon/tku.svg'))\n window = Splash()\n window.show()\n\n exit(app.exec_())\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 57672, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.datetime.now", "line_number": 354, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 354, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 370, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 370, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 423, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 423, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 444, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 444, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 453, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 453, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 793, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 793, "usage_type": "attribute"}, {"api_name": "numpy.delete", "line_number": 801, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 893, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 893, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1114, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1114, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 1517, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1528, "usage_type": "attribute"}]} +{"seq_id": "423431560", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n# URL을 읽어서 HTML를 받아오고,\r\n\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)\r\nclient = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.\r\ndb = client.dbsparta # 'dbsparta'라는 이름의 db를 사용합니다. 'dbsparta' db가 없다면 새로 만듭니다.\r\n# 타겟 URL을 읽어서 HTML를 받아오고,\r\nheaders = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\r\ndata = requests.get('https://www.genie.co.kr/chart/top200?ditc=D&rtm=N&ymd=20200713', headers=headers)\r\n\r\n# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦\r\nsoup = BeautifulSoup(data.text, 'html.parser')\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(1) > td.number\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(3) > td.number\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(1) > td.info > a.title.ellipsis\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(3) > td.info > a.title.ellipsis\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(1) > td.info > a.artist.ellipsis\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(3) > td.info > a.artist.ellipsis\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(1) > td.number\r\n#body-content > div.newest-list > div > table > tbody > tr:nth-child(1) > td.number > span > span > span > span\r\n\r\nmusics = soup.select('#body-content > div.newest-list > div > table > tbody > tr')\r\n\r\nfor music in musics:\r\n rank = music.select_one('td.number').text\r\n title = music.select_one('td.info > a.title.ellipsis').text.strip()\r\n artist = music.select_one('td.info > a.artist.ellipsis').text.strip()\r\n print(rank[0:2].strip(),title,artist)\r\n doc = {\r\n 'rank': rank,\r\n 'title': title,\r\n 'artist': artist\r\n }\r\n db.musics_rank.insert_one(doc)", "sub_path": "week3.py", "file_name": "week3.py", "file_ext": "py", "file_size_in_byte": 2173, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pymongo.MongoClient", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "61339487", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404\nimport json\n# Create your views here.\nfrom .models import Track\n\ndef track_view(request, title):\n track = get_object_or_404(Track, title=title)\n data = {\n 'title': track.title,\n 'order': track.order,\n 'album': track.album,\n 'artist': {\n 'name':track.artist.first_name+track.artist.last_name,\n 'bio': track.artist.biography,\n }\n }\n json_data = json.dumps(data)\n return HttpResponse(json_data, content_type='application/json')\n #return render(request, 'track.html', {'track':track})", "sub_path": "tracks/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 656, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 8, "usage_type": "call"}, {"api_name": "models.Track", "line_number": 8, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 18, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "225443740", "text": "import torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nfrom torch import optim\r\nimport torch.nn.functional as F\r\nfrom torch.nn.backends.thnn import backend # backend.LSTMCell etc...\r\nimport pdb\r\nimport random\r\nimport math\r\nfrom torch.nn import init\r\nimport numpy as np\r\n\r\n\r\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nclass RNN(nn.Module):\r\n\r\n def __init__(self, vocab, word2vec_size, embedding_size, infer_size, batch_size, embedding_dim, hidden_size=100, dropout=.1):\r\n super(RNN, self).__init__()\r\n self.batch_size = batch_size\r\n self.hidden_size = hidden_size\r\n self.input_size = hidden_size #LSTM input\r\n\r\n self.word2vec = nn.Embedding.from_pretrained(vocab.word2vector_weights)\r\n\r\n self.embedding = nn.Embedding(embedding_size, embedding_dim, padding_idx=0) #researve zero vector\r\n nn.init.uniform_(self.embedding.weight.data[1:,:], -.05, .05)\r\n\r\n self.infer_embedding = nn.Embedding(infer_size, embedding_dim, padding_idx=0)\r\n nn.init.uniform_(self.embedding.weight.data[1:,:], -1.0, 1.0)\r\n self.freezeLayer(self.infer_embedding)\r\n\r\n\r\n self.x_proj = nn.Linear(embedding_dim, self.hidden_size)\r\n self.lstm_p = nn.LSTM(self.input_size, hidden_size)\r\n self.init_parameters(self.lstm_p)\r\n\r\n self.lstm_h = nn.LSTM(self.input_size, hidden_size)\r\n self.init_parameters(self.lstm_h)\r\n\r\n # self.reset_parameters(self.lstm_h)\r\n # The linear layer that maps from hidden state space to tag space\r\n self.i2o = nn.Linear(hidden_size, 3) #只需要最后一个hidden state\r\n \r\n self.softmax = nn.LogSoftmax(dim=1)\r\n\r\n def forward(self, sentence_p, sentence_h, h_len, hidden, vocab_size, infer_size):\r\n \"\"\"s\r\n >=vocab_size 与 0], 0)\r\n # p_output = self.dp(p_output)\r\n output = self.i2o(F.tanh(p_output))\r\n\r\n output = self.softmax(output)\r\n\r\n return output\r\n\r\n def initHidden(self):\r\n h0 = Variable(torch.zeros(1, self.batch_size, self.hidden_size)).to(device)\r\n c0 = Variable(torch.zeros(1, self.batch_size, self.hidden_size)).to(device)\r\n return (h0, c0)\r\n\r\n def freezeLayer(self, layer):\r\n for param in layer.parameters():\r\n param.requires_grad = False\r\n\r\n def init_parameters(self, model, Xavier=True):\r\n stdv = 1.0 / math.sqrt(model.hidden_size)\r\n if Xavier:\r\n nn.init.xavier_normal_(model.weight_hh_l0)\r\n nn.init.xavier_normal_(model.weight_ih_l0)\r\n model.bias_hh_l0.data.uniform_(-stdv, stdv)\r\n model.bias_ih_l0.data.uniform_(-stdv, stdv)\r\n else:\r\n stdv = 1.0 / math.sqrt(model.hidden_size)\r\n for weight in model.parameters():\r\n weight.data.uniform_(-stdv, stdv)\r\n\r\n def splitEmbedding(self, tensor, bd, level):\r\n embedding_idx = torch.clamp(tensor-bd+1, min=0, max=level)%level # e.g. +1 腾出1个位置给embedding生成0\r\n word2vec_idx = torch.clamp(tensor, max=bd)%bd #replace >=bd by 0\r\n rand_idx = torch.clamp(tensor-bd-level+2, min=0)\r\n return embedding_idx, word2vec_idx, rand_idx\r\n\r\n# class RNN(nn.Module):\r\n# \"\"\"\r\n# Rochtaschel 2015 shared coding\r\n# \"\"\" \r\n# def __init__(self, w2v_embedding_size, embedding_size, pretrain_wts, batch_size, input_size=50, hidden_size=100, dropout=1, word2vec=False):\r\n# super(RNN, self).__init__()\r\n# self.batch_size = batch_size\r\n# self.hidden_size = hidden_size\r\n# if word2vec:\r\n# self.w2v_embedding = nn.Embedding(w2v_embedding_size, input_size)\r\n# self.w2v_embedding.weight.data.copy_(pretrain_wts)\r\n\r\n# self.embedding = nn.Embedding(embedding_size, input_size)\r\n\r\n# self.lstm_p = nn.LSTM(input_size, hidden_size)\r\n# self.lstm_h = nn.LSTM(input_size, hidden_size)\r\n# # The linear layer that maps from hidden state space to tag space\r\n# self.i2o = nn.Linear(hidden_size, 3) #只需要最后一个hidden state\r\n# self.softmax = nn.LogSoftmax(dim=1)\r\n\r\n# def forward(self, sentence_p, sentence_h, hidden):\r\n# embedded_p = self.embedding(sentence_p) # 44 × 32 × 50\r\n# embedded_h = self.embedding(sentence_h) # 6 × 32 × 50\r\n\r\n# output_p, (h, c) = self.lstm_p(embedded_p, hidden) # 44 × 32 × 100 seq. × batch × features\r\n# # (h, _) = self.initHidden()\r\n\r\n# output_h, _ = self.lstm_h(embedded_h, (h, c))\r\n# # combined_input = torch.transpose(torch.cat((output_s, output_q), 0), 0, 1)\r\n# # output = self.i2o(combined_input.contiguous().view(32, -1))\r\n\r\n# output = self.i2o(F.tanh(output_h[-1]))\r\n# output = self.softmax(output)\r\n# return output\r\n\r\n# def initHidden(self):\r\n# h0 = Variable(torch.zeros(1, self.batch_size, self.hidden_size))\r\n# c0 = Variable(torch.zeros(1, self.batch_size, self.hidden_size))\r\n# return (h0, c0)\r\n\r\n\r\n# # class LayerNorm(nn.Module):\r\n# # def __init__(self, features, eps=1e-5):\r\n# # super().__init__()\r\n# # # self.gamma = nn.Parameter(torch.ones(features))\r\n# # # self.beta = nn.Parameter(torch.zeros(features))\r\n# # self.gamma = Variable(torch.ones(features))\r\n# # self.beta = Variable(torch.zeros(features))\r\n# # self.eps = eps\r\n\r\n# # def forward(self, x):\r\n# # mean = x.mean(-1, keepdim=True) # -1 表示最后一维 \r\n# # std = x.std(-1, keepdim=True)\r\n\r\n# # return self.gamma * (x - mean) / (std + self.eps) + self.beta\r\n\r\n# # class RNN(nn.Module):\r\n# # \"\"\"add generated network based on the Rochtaschel's LSTM\r\n# # for metanetwork:\r\n# # x^_t = [x_t, h_t-1]\r\n# # h^_t \r\n# # arguments: batch_size: data width within a batch for one LSTM cell\r\n# # u_hidden_size: hidden features for upper net (basic net)\r\n# # m_hidden_size: hidden features for meta net\r\n# # output_size: output features for upper net\r\n# # \"\"\"\r\n# # def __init__(self, embedding_size, batch_size, u_hidden_size, m_hidden_size, output_size):\r\n# # super(RNN, self).__init__()\r\n# # combined_mlp_size = embedding_size + u_hidden_size # for [x^_t h^_t-1]\r\n\r\n# # self.ln = LayerNorm(u_hidden_size)\r\n# # self.lstm = nn.LSTMCell(self.combined_mlp_size, m_hidden_size)\r\n# # self.weight_h = torch.Tensor(self.batch_size, self.combined_mlp_size, u_hidden_size)\r\n# # self.layer_weights_hh_a = F.sigmoid(nn.Linear(m_hidden_size, combined_mlp_size))\r\n# # self.layer_weights_hh_b = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n# # self.layer_weights_ho_a = F.sigmoid(nn.Linear(m_hidden_size, combined_mlp_size))\r\n# # self.layer_weights_ho_b = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n# # self.layer_weights_hi_a = F.sigmoid(nn.Linear(m_hidden_size, combined_mlp_size))\r\n# # self.layer_weights_hi_b = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n# # self.layer_weights_hc_a = F.sigmoid(nn.Linear(m_hidden_size, combined_mlp_size))\r\n# # self.layer_weights_hc_b = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n \r\n# # self.layer_shifts_hh = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n# # self.layer_shifts_ho = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n# # self.layer_shifts_hi = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n# # self.layer_shifts_hc = F.sigmoid(nn.Linear(m_hidden_size, m_hidden_size))\r\n\r\n# # torch.nn.init.uniform_(self.fw, -.01, .01)\r\n\r\n# # def forward(self, data, programme, u_hidden, m_hidden):\r\n# # #u_hidden = uh0, uc0 m_hidden = mh0, mc0, meta1([x1, uh0], (mh0, mc0)) -> (mh1, mc1) basic1(x1, (uh0, uc0)) -> (uh1, mh1)\r\n# # embedded = self.Embedding(data) # 44 × 32 × 50\r\n# # for i in range(embedded.size()[0]):\r\n# # m_h, m_c = self.lstm(torch.cat((embedded[i], u_hidden[0]), 1), m_hidden) #Inputs: input, (h_0, c_0)\r\n# # m_hidden = (m_h, m_c)\r\n\r\n# # whh_a = self.layer_weights_hh_a(m_h)\r\n# # whh_b = self.layer_weights_hh_b(m_h)\r\n# # who_a = self.layer_weights_ho_a(m_h)\r\n# # who_b = self.layer_weights_ho_b(m_h)\r\n# # whi_a = self.layer_weights_hi_a(m_h)\r\n# # whi_b = self.layer_weights_hi_b(m_h)\r\n# # whc_a = self.layer_weights_hc_a(m_h)\r\n# # whc_b = self.layer_weights_hc_b(m_h)\r\n \r\n# # weight_hh = whh_a * whh_b\r\n# # weight_ho = who_a * who_b\r\n# # weight_hi = whi_a * whi_b\r\n# # weight_hc = whc_a * whc_b\r\n\r\n# # shh = self.layer_shifts_hh(m_h)\r\n# # sho = self.layer_shifts_ho(m_h)\r\n# # shi = self.layer_shifts_hi(m_h)\r\n# # shc = self.layer_shifts_hc(m_h)\r\n\r\n# # *u = backend.LSTMCell(embedded[i], u_hidden, )\r\n# # u_hidden = *u\r\n\r\n\r\n\r\n\r\n\r\n \r\n# # self.fw = self.gate(m_h, self.combined_mlp_size, self.m_hidden_size, self.fw) #alpha_gamma = combined_mlp_size, beta_delta = m_hidden_size\r\n\r\n# # def gate(self, input_weights, alpha_gamma, beta_delta, fast_weights): #update F1, F2\r\n# # alpha = torch.unsqueeze(input_weights[:, :alpha_gamma], 2)\r\n# # beta = torch.unsqueeze(input_weights[:, alpha_gamma:alpha_gamma + beta_delta], 1)\r\n# # gamma = torch.unsqueeze(input_weights[:, alpha_gamma + beta_delta:2*alpha_gamma + beta_delta], 2)\r\n# # delta = torch.unsqueeze(input_weights[:, 2*alpha_gamma + beta_delta:], 1)\r\n\r\n# # H = F.tanh(alpha) * F.tanh(beta)\r\n# # T = F.sigmoid(gamma) * F.sigmoid(delta)\r\n# # fast_weights = T.mul(H) + torch.mul(Variable(torch.ones(alpha_gamma, beta_delta)) - T, fast_weights)\r\n\r\n# # return fast_weights\r\n", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 11308, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.device", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Embedding.from_pretrained", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.init.uniform_", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.init.uniform_", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.functional.tanh", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 84, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "142889946", "text": "#! python3\n# -*- coding: utf-8 -*-\nimport json\nimport datetime as dt\nimport re\nimport os\nimport logging\n\n\ndef get_auth_data():\n print(os.getcwd())\n with open('app/auth', 'rt') as auth_file:\n data = json.loads(auth_file.read())\n return data\n\n\ndef get_cur_time_filename():\n now = dt.datetime.today().timetuple()\n now = '{0}_{1}_{2}_{3}_{4}_{5}'.format(str(now.tm_year), str(now.tm_mon), str(now.tm_mday), str(now.tm_hour),\n str(now.tm_min), str(now.tm_sec))\n return now\n\n\ndef logger_init(log_name_obj=None, prefix=None):\n re_check = re.compile(u'[.]log')\n if log_name_obj is None:\n log_name_obj = get_cur_time_filename()\n\n if not re.search(re_check, log_name_obj):\n log_name_obj += '.log'\n\n if prefix is not None:\n log_name_obj = prefix + log_name_obj\n\n dir_name = str(os.path.dirname(os.path.abspath(__file__)))\n if not os.path.exists(dir_name + '/logs/'):\n os.makedirs(dir_name + '/logs/')\n\n log_name = dir_name + '/logs/{0}'.format(log_name_obj)\n logging.basicConfig(format=u'%(levelname)s [%(asctime)s] : %(message)s', filename=log_name, level=logging.INFO)\n try:\n my_logger_obj = logging.getLogger('{0} logger'.format(log_name_obj))\n return my_logger_obj\n except Exception as e:\n print('Logger init failure. Details: ', e)\n\n\nlogger = logger_init('main')\n", "sub_path": "app/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.getcwd", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "re.search", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "420514028", "text": "# SPDX-License-Identifier: BSD-3-Clause\n# Copyright Contributors to the OpenColorIO Project.\n\"\"\"\nOpenColorIO Config Generation Common Objects\n============================================\n\nDefines various objects related to *OpenColorIO* config generation:\n\n- :func:`opencolorio_config_aces.colorspace_factory`\n- :class:`opencolorio_config_aces.ConfigData`\n- :func:`opencolorio_config_aces.validate_config`\n- :func:`opencolorio_config_aces.generate_config`\n\"\"\"\n\nimport logging\n\nfrom opencolorio_config_aces.utilities import required, is_iterable\n\n__author__ = 'OpenColorIO Contributors'\n__copyright__ = 'Copyright Contributors to the OpenColorIO Project.'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'OpenColorIO Contributors'\n__email__ = 'ocio-dev@lists.aswf.io'\n__status__ = 'Production'\n\n__all__ = [\n 'LOG_ALLOCATION_VARS', 'colorspace_factory', 'ConfigData',\n 'validate_config', 'generate_config'\n]\n\nLOG_ALLOCATION_VARS = (-8, 5, 2**-8)\n\"\"\"\nAllocation variables for logarithmic data representation.\n\nLOG_ALLOCATION_VARS : tuple\n\"\"\"\n\n\n@required('OpenColorIO')\ndef colorspace_factory(name,\n family=None,\n encoding=None,\n categories=None,\n description=None,\n equality_group=None,\n bit_depth=None,\n allocation=None,\n allocation_vars=None,\n to_reference_transform=None,\n from_reference_transform=None,\n is_data=None,\n base_colorspace=None):\n \"\"\"\n *OpenColorIO* colorspace factory.\n\n Parameters\n ----------\n name : unicode\n *OpenColorIO* colorspace name.\n family : unicode, optional\n *OpenColorIO* colorspace family.\n encoding : unicode, optional\n *OpenColorIO* colorspace encoding.\n categories : array_like, optional\n *OpenColorIO* colorspace categories.\n description : unicode, optional\n *OpenColorIO* colorspace description.\n equality_group : unicode, optional\n *OpenColorIO* colorspace equality_group.\n bit_depth : int, optional\n *OpenColorIO* colorspace bit depth.\n allocation : int, optional\n *OpenColorIO* colorspace allocation type.\n allocation_vars : tuple, optional\n *OpenColorIO* colorspace allocation variables.\n to_reference_transform : object, optional\n *To Reference* *OpenColorIO* colorspace transform.\n from_reference_transform : object, optional\n *From Reference* *OpenColorIO* colorspace transform.\n is_data : bool, optional\n Whether the colorspace represents data.\n base_colorspace : ColorSpace, optional\n *OpenColorIO* base colorspace inherited for bit depth, allocation,\n allocation variables, and to/from reference transforms.\n\n Returns\n -------\n ColorSpace\n *OpenColorIO* colorspace.\n \"\"\"\n\n import PyOpenColorIO as ocio\n\n if categories is None:\n categories = []\n\n if bit_depth is None:\n bit_depth = ocio.BIT_DEPTH_F32\n\n if base_colorspace is not None:\n colorspace = base_colorspace\n else:\n colorspace = ocio.ColorSpace()\n\n colorspace.setBitDepth(bit_depth)\n\n if allocation is not None:\n colorspace.setAllocation(allocation)\n\n if allocation_vars is not None:\n colorspace.setAllocationVars(allocation_vars)\n\n if to_reference_transform is not None:\n colorspace.setTransform(to_reference_transform,\n ocio.COLORSPACE_DIR_TO_REFERENCE)\n\n if from_reference_transform is not None:\n colorspace.setTransform(from_reference_transform,\n ocio.COLORSPACE_DIR_FROM_REFERENCE)\n\n colorspace.setName(name)\n\n if family is not None:\n colorspace.setFamily(family)\n\n if encoding is not None:\n colorspace.setEncoding(encoding)\n\n for category in categories:\n colorspace.addCategory(category)\n\n if description is not None:\n colorspace.setDescription(description)\n\n if equality_group is not None:\n colorspace.setEqualityGroup(equality_group)\n\n if is_data is not None:\n colorspace.setIsData(is_data)\n\n return colorspace\n\n\nclass ConfigData:\n \"\"\"\n Defines the data container for an *OpenColorIO* config.\n\n Parameters\n ----------\n roles : dict\n Config roles, a dict of role and colorspace name.\n colorspaces : array_like\n Config colorspaces, an iterable of\n :attr:`PyOpenColorIO.ColorSpace` class instances.\n views : array_like\n Config views, an iterable of dicts of display, view\n and colorspace names.\n active_displays : array_like\n Config active displays, an iterable of display names.\n active_views : array_like, optional\n Config active displays, an iterable of view names.\n file_rules : array_like, optional\n Config file rules, a dict of file rules.\n viewing_rules : array_like, optional\n Config viewing rules, a dict of viewing rules.\n description : unicode, optional\n Config description.\n profile_version : int, optional\n Config major version, i.e. 1 or 2.\n\n Attributes\n ----------\n roles\n colorspaces\n views\n active_displays\n active_views\n file_rules\n viewing_rules\n description\n profile_version\n \"\"\"\n\n def __init__(self,\n roles,\n colorspaces,\n views,\n active_displays,\n active_views=None,\n file_rules=None,\n viewing_rules=None,\n description=None,\n profile_version=None):\n self._roles = {}\n self.roles = roles\n self._colorspaces = []\n self.colorspaces = colorspaces\n self._views = []\n self.views = views\n self._active_displays = []\n self.active_displays = active_displays\n self._active_views = []\n self.active_views = active_views\n self._file_rules = []\n self.file_rules = file_rules\n self._viewing_rules = []\n self.viewing_rules = viewing_rules\n self._description = None\n self.description = description\n self._profile_version = 1\n self.profile_version = profile_version\n\n @property\n def roles(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config roles.\n\n Parameters\n ----------\n value : dict\n Attribute value.\n\n Returns\n -------\n dict\n *OpenColorIO* config roles.\n \"\"\"\n\n return self._roles\n\n @roles.setter\n def roles(self, value):\n \"\"\"\n Setter for **self.roles** property.\n \"\"\"\n\n if value is not None:\n assert isinstance(value, dict), ((\n '\"{0}\" attribute: \"{1}\" is not a \"dict\" like object!').format(\n 'roles', value))\n self._roles = dict(value)\n\n @property\n def colorspaces(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config colorspaces.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n\n Returns\n -------\n list\n *OpenColorIO* config colorspaces.\n \"\"\"\n\n return self._colorspaces\n\n @colorspaces.setter\n def colorspaces(self, value):\n \"\"\"\n Setter for **self.colorspaces** property.\n \"\"\"\n\n if value is not None:\n assert is_iterable(value), (\n ('\"{0}\" attribute: \"{1}\" is not an \"array_like\" like object!'\n ).format('colorspaces', value))\n self._colorspaces = list(value)\n\n @property\n def views(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config views.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n\n Returns\n -------\n list\n *OpenColorIO* config views.\n \"\"\"\n\n return self._views\n\n @views.setter\n def views(self, value):\n \"\"\"\n Setter for **self.views** property.\n \"\"\"\n\n if value is not None:\n assert is_iterable(value), (\n ('\"{0}\" attribute: \"{1}\" is not an \"array_like\" like object!'\n ).format('views', value))\n self._views = list(value)\n\n @property\n def active_displays(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config active\n displays.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n\n Returns\n -------\n list\n *OpenColorIO* config active displays.\n \"\"\"\n\n return self._active_displays\n\n @active_displays.setter\n def active_displays(self, value):\n \"\"\"\n Setter for **self.active_displays** property.\n \"\"\"\n\n if value is not None:\n assert is_iterable(value), (\n ('\"{0}\" attribute: \"{1}\" is not an \"array_like\" like object!'\n ).format('active_displays', value))\n self._active_displays = list(value)\n\n @property\n def active_views(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config active views.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n\n Returns\n -------\n list\n *OpenColorIO* config active views.\n \"\"\"\n\n return self._active_views\n\n @active_views.setter\n def active_views(self, value):\n \"\"\"\n Setter for **self.active_views** property.\n \"\"\"\n\n if value is not None:\n assert is_iterable(value), (\n ('\"{0}\" attribute: \"{1}\" is not an \"array_like\" like object!'\n ).format('active_views', value))\n self._active_views = list(value)\n\n @property\n def file_rules(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config file rules.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n\n Returns\n -------\n list\n *OpenColorIO* config file rules.\n \"\"\"\n\n return self._file_rules\n\n @file_rules.setter\n def file_rules(self, value):\n \"\"\"\n Setter for **self.file_rules** property.\n \"\"\"\n\n if value is not None:\n assert is_iterable(value), (\n ('\"{0}\" attribute: \"{1}\" is not an \"array_like\" like object!'\n ).format('file_rules', value))\n self._file_rules = list(value)\n\n @property\n def viewing_rules(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config viewing rules.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n\n Returns\n -------\n list\n *OpenColorIO* config viewing rules.\n \"\"\"\n\n return self._viewing_rules\n\n @viewing_rules.setter\n def viewing_rules(self, value):\n \"\"\"\n Setter for **self.viewing_rules** property.\n \"\"\"\n\n if value is not None:\n assert is_iterable(value), (\n ('\"{0}\" attribute: \"{1}\" is not an \"array_like\" like object!'\n ).format('viewing_rules', value))\n self._viewing_rules = list(value)\n\n @property\n def description(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config description.\n\n Parameters\n ----------\n value : unicode\n Attribute value.\n\n Returns\n -------\n unicode\n *OpenColorIO* config description.\n \"\"\"\n\n return self._description\n\n @description.setter\n def description(self, value):\n \"\"\"\n Setter for **self.description** property.\n \"\"\"\n\n if value is not None:\n assert isinstance(value, str), ((\n '\"{0}\" attribute: \"{1}\" is not a \"str\" like object!').format(\n 'description', value))\n self._description = value\n\n @property\n def profile_version(self):\n \"\"\"\n Getter and setter property for the *OpenColorIO* config profile\n version.\n\n Parameters\n ----------\n value : int\n Attribute value.\n\n Returns\n -------\n int\n *OpenColorIO* config profile version.\n \"\"\"\n\n return self._profile_version\n\n @profile_version.setter\n def profile_version(self, value):\n \"\"\"\n Setter for **self.profile_version** property.\n \"\"\"\n\n if value is not None:\n assert isinstance(value, int), ((\n '\"{0}\" attribute: \"{1}\" is not an \"int\" like object!').format(\n 'profile_version', value))\n self._profile_version = int(value)\n\n\ndef validate_config(config):\n \"\"\"\n Validates given *OpenColorIO* config.\n\n Parameters\n ----------\n config : Config\n *OpenColorIO* config to validate.\n\n Returns\n -------\n bool\n Whether the *OpenColorIO* config is valid.\n \"\"\"\n\n try:\n config.validate()\n return True\n except Exception as error:\n logging.critical(error)\n return False\n\n\n@required('OpenColorIO')\ndef generate_config(data, config_name=None, validate=True):\n \"\"\"\n Generates the *OpenColorIO* config from given data.\n\n Parameters\n ----------\n data : ConfigData\n *OpenColorIO* config data.\n config_name : unicode, optional\n *OpenColorIO* config file name, if given the config will be written to\n disk.\n validate : bool, optional\n Whether to validate the config.\n\n Returns\n -------\n Config\n *OpenColorIO* config.\n \"\"\"\n\n import PyOpenColorIO as ocio\n\n config = ocio.Config()\n config.setMajorVersion(data.profile_version)\n\n if data.description is not None:\n config.setDescription(data.description)\n\n for colorspace, role in data.roles.items():\n logging.debug(f'Adding \"{colorspace}\" colorspace as \"{role}\" role.')\n config.setRole(role, colorspace)\n\n for colorspace in data.colorspaces:\n logging.debug(f'Adding colorspace \"{colorspace.getName()}\".')\n config.addColorSpace(colorspace)\n\n for view in data.views:\n display = view['display']\n view_name = view['view']\n colorspace = view.get('colorspace')\n looks = view.get('looks')\n view_transform = view.get('view_transform')\n rule = view.get('rule')\n description = view.get('rule')\n if colorspace:\n logging.debug(f'Adding \"{view_name}\" view to \"{display}\" display '\n f'using \"{colorspace}\" colorspace.')\n\n config.addDisplayView(display, view_name, colorspace, looks)\n else:\n logging.debug(f'Adding \"{view_name}\" view to \"{display}\" display '\n f'using \"{view_transform}\" view_transform, '\n f'\"{rule}\" rule and \"{description}\" description.')\n\n config.addDisplayView(display, view_name, view_transform, looks,\n rule, description)\n\n config.setActiveDisplays(','.join(data.active_displays))\n config.setActiveViews(','.join(data.active_views))\n\n file_rules = ocio.FileRules()\n rule_index = 0\n for file_rule in reversed(data.file_rules):\n name = file_rule['name']\n colorspace = file_rule['colorspace']\n regex = file_rule.get('regex')\n pattern = file_rule.get('pattern')\n extension = file_rule.get('extension')\n if name == 'Default':\n logging.debug(f'Setting \"{name}\" file rule with '\n f'\"{colorspace}\" colorspace.')\n file_rules.setDefaultRuleColorSpace(colorspace)\n elif regex:\n logging.debug(f'Adding \"{name}\" file rule with '\n f'\"{regex}\" regex pattern for '\n f'\"{colorspace}\" colorspace.')\n file_rules.insertRule(rule_index, name, colorspace, regex)\n rule_index += 1\n else:\n logging.debug(f'Adding \"{name}\" file rule with '\n f'\"{pattern}\" pattern and \"{extension}\" extension '\n f'for \"{colorspace}\" colorspace.')\n file_rules.insertRule(rule_index, name, colorspace, pattern,\n extension)\n rule_index += 1\n config.setFileRules(file_rules)\n\n viewing_rules = ocio.ViewingRules()\n for i, viewing_rule in enumerate(reversed(data.viewing_rules)):\n logging.warning('Inserting a viewing rule is not supported yet!')\n # viewing_rules.insertRule()\n config.setViewingRules(viewing_rules)\n\n if validate:\n validate_config(config)\n\n if config_name is not None:\n with open(config_name, 'w') as file:\n file.write(config.serialize())\n\n return config\n\n\nif __name__ == '__main__':\n required('OpenColorIO')(lambda: None)()\n\n import os\n import opencolorio_config_aces\n import PyOpenColorIO as ocio\n\n build_directory = os.path.join(opencolorio_config_aces.__path__[0], '..',\n 'build')\n\n if not os.path.exists(build_directory):\n os.makedirs(build_directory)\n\n logging.basicConfig()\n logging.getLogger().setLevel(logging.INFO)\n\n # \"OpenColorIO 1\" configuration.\n colorspace_1 = colorspace_factory('Gamut - sRGB', 'Gamut')\n\n colorspace_2 = colorspace_factory(\n 'CCTF - sRGB',\n 'CCTF',\n description=('WARNING: The sRGB \"EOTF\" is purposely incorrect and '\n 'only a placeholder!'),\n to_reference_transform=ocio.ExponentTransform([2.2, 2.2, 2.2, 1]))\n\n colorspace_3 = colorspace_factory(\n 'Colorspace - sRGB',\n 'Colorspace',\n to_reference_transform=ocio.ColorSpaceTransform(\n 'CCTF - sRGB', 'Gamut - sRGB'))\n\n colorspace_4 = colorspace_factory(\n 'View - sRGB Monitor - sRGB', 'View', base_colorspace=colorspace_3)\n\n colorspace_5 = colorspace_factory('Utility - Raw', 'Utility', is_data=True)\n\n data = ConfigData(\n roles={'Gamut - sRGB': ocio.ROLE_SCENE_LINEAR},\n colorspaces=[\n colorspace_1, colorspace_2, colorspace_3, colorspace_4,\n colorspace_5\n ],\n views=[\n {\n 'display': 'sRGB Monitor',\n 'view': 'sRGB - sRGB',\n 'colorspace': 'View - sRGB Monitor - sRGB'\n },\n {\n 'display': 'sRGB Monitor',\n 'view': 'Raw',\n 'colorspace': 'Utility - Raw'\n },\n ],\n active_displays=['sRGB Monitor'],\n active_views=['sRGB - sRGB'],\n )\n\n generate_config(data, os.path.join(build_directory, 'config-v1.ocio'))\n\n # \"OpenColorIO 2\" configuration.\n data.profile_version = 2\n transform = ocio.ExponentWithLinearTransform()\n transform.setGamma([2.4, 2.4, 2.4, 1])\n transform.setOffset([0.055, 0.055, 0.055, 0])\n data.colorspaces[1].setTransform(transform,\n ocio.COLORSPACE_DIR_TO_REFERENCE)\n data.colorspaces[1].setDescription('')\n\n # TODO: Use new display colorspace system.\n data.views = [\n {\n 'display': 'sRGB Monitor',\n 'view': 'sRGB - sRGB',\n 'colorspace': 'View - sRGB Monitor - sRGB'\n },\n {\n 'display': 'sRGB Monitor',\n 'view': 'Raw',\n 'colorspace': 'Utility - Raw'\n },\n ]\n data.file_rules = [\n {\n 'name': 'Default',\n 'colorspace': 'Gamut - sRGB'\n },\n {\n 'name': 'Linear - sRGB',\n 'colorspace': 'Gamut - sRGB',\n 'regex': '_[sS][rR][gG][bB]\\\\.([eE][xX][rR]|[hH][dD][rR])$'\n },\n {\n 'name': 'EOTF - sRGB',\n 'colorspace': 'CCTF - sRGB',\n 'regex': '_[sS][rR][gG][bB]\\\\.([pP][nN][gG]|[tT][iI][fF])$'\n },\n ]\n data.viewing_rules = []\n\n generate_config(data, os.path.join(build_directory, 'config-v2.ocio'))\n", "sub_path": "opencolorio_config_aces/config/generation/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 20416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "PyOpenColorIO.BIT_DEPTH_F32", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PyOpenColorIO.ColorSpace", "line_number": 103, "usage_type": "call"}, {"api_name": "PyOpenColorIO.COLORSPACE_DIR_TO_REFERENCE", "line_number": 115, "usage_type": "attribute"}, {"api_name": "PyOpenColorIO.COLORSPACE_DIR_FROM_REFERENCE", "line_number": 119, "usage_type": "attribute"}, {"api_name": "opencolorio_config_aces.utilities.required", "line_number": 39, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.is_iterable", "line_number": 268, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.is_iterable", "line_number": 298, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.is_iterable", "line_number": 329, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.is_iterable", "line_number": 359, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.is_iterable", "line_number": 389, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.is_iterable", "line_number": 419, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 505, "usage_type": "call"}, {"api_name": "PyOpenColorIO.Config", "line_number": 532, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 539, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 543, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 555, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 560, "usage_type": "call"}, {"api_name": "PyOpenColorIO.FileRules", "line_number": 570, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 579, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 583, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 589, "usage_type": "call"}, {"api_name": "PyOpenColorIO.ViewingRules", "line_number": 597, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 599, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.required", "line_number": 509, "usage_type": "call"}, {"api_name": "opencolorio_config_aces.utilities.required", "line_number": 614, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 620, "usage_type": "call"}, {"api_name": "os.path", "line_number": 620, "usage_type": "attribute"}, {"api_name": "opencolorio_config_aces.__path__", "line_number": 620, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 623, "usage_type": "call"}, {"api_name": "os.path", "line_number": 623, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 624, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 626, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 627, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 627, "usage_type": "attribute"}, {"api_name": "PyOpenColorIO.ExponentTransform", "line_number": 637, "usage_type": "call"}, {"api_name": "PyOpenColorIO.ColorSpaceTransform", "line_number": 642, "usage_type": "call"}, {"api_name": "PyOpenColorIO.ROLE_SCENE_LINEAR", "line_number": 651, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 672, "usage_type": "call"}, {"api_name": "os.path", "line_number": 672, "usage_type": "attribute"}, {"api_name": "PyOpenColorIO.ExponentWithLinearTransform", "line_number": 676, "usage_type": "call"}, {"api_name": "PyOpenColorIO.COLORSPACE_DIR_TO_REFERENCE", "line_number": 680, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 714, "usage_type": "call"}, {"api_name": "os.path", "line_number": 714, "usage_type": "attribute"}]} +{"seq_id": "199804051", "text": "import speedtest as sp\nimport datetime\nimport requests\nimport pycountry\n\n\ndef speed_test():\n\n # Do the speed test\n s = sp.Speedtest()\n s.download()\n s.upload()\n\n # Get the results\n res = s.results.dict()\n return res\n\n\ndef build_body(res, user, device):\n body = { \n \"user\": user,\n \"device\": device,\n \"timestamp\": int(datetime.datetime.now().timestamp()),\n \"data\": {\n \"speeds\": {\n \"download\": round(res['download']),\n \"upload\": round(res['upload']),\n },\n \"client\": {\n \"ip\": res['client']['ip'],\n \"lat\": res['client']['lat'],\n \"lon\": res['client']['lon'],\n \"isp\": res['client']['isp'],\n \"country\": res['client']['country'],\n },\n \"server\": {\n \"host\": res['server']['host'],\n \"lat\": res['server']['lat'],\n \"lon\": res['server']['lon'],\n \"country\": pycountry.countries.get(name=res['server']['country'], default=\"Unknown Country\").alpha_2,\n \"distance\": round(res['server']['d'], 4),\n \"ping\": res['ping'],\n \"id\": res['server']['id'],\n }\n }\n }\n return body\n\n\ndef post_to_api(body, api_url):\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n r = requests.post(api_url, json=body, headers=headers)\n \n # Print the status code of the response.\n print(r.status_code)\n\n\ndef main():\n\n # Main user (yourself)\n user = 'sfl' # Replace with your username\n api_url = \"https://speedtest-api-raxcsdlzxq-ew.a.run.app/speedtest\" # Replace with your API URL\n\n # Do speed test\n raw_res = speed_test()\n\n # Format body to API spec\n body = build_body(raw_res, user, 1)\n\n # Post to API\n post_to_api(body, api_url)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "python/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "speedtest.Speedtest", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pycountry.countries.get", "line_number": 40, "usage_type": "call"}, {"api_name": "pycountry.countries", "line_number": 40, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "172397350", "text": "import sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QFileDialog\nimport pyqtgraph as pg\nfrom pyqtgraph import Point\nfrom pyqtgraph.graphicsItems.ROI import ROI\nfrom scipy import interpolate\nfrom scipy.spatial.distance import cdist\nfrom dataclasses import dataclass, astuple\nfrom pharynx_redox.gui.qt_py_files.spline_editor import Ui_Form\nimport numpy as np\n\n\nclass Spline:\n def __init__(self, ctrl_pts=[], n_points=500):\n \"\"\"\n Initialize the Spline object\n \n Parameters\n ----------\n ctrl_pts : list, optional\n the list of points that this spline will interpolate between, by default []\n n_points : int, optional\n the number of points under which to evaluate the spline, by default 100\n \"\"\"\n self._spl = None\n self.ctrl_pts = []\n\n self.add_ctrl_pts(ctrl_pts)\n\n def add_ctrl_pts(self, ctrl_pts):\n for (x, y) in ctrl_pts:\n self.ctrl_pts.append(Point(x, y))\n self._update_spline()\n\n def set_ctrl_pts(self, pos):\n self.ctrl_pts = []\n self.add_ctrl_pts(pos)\n\n def _update_spline(self):\n xys = np.asarray(list(map(list, self.ctrl_pts)))\n if len(xys) > 0:\n self._spl = interpolate.Akima1DInterpolator(\n np.linspace(0, 1, len(xys)), xys\n )\n\n def __call__(self, n=500):\n \"\"\"\n evaluate the spline\n \n Parameters\n ----------\n xs : list of numbers, optional\n the list of numbers (or numpy array) to evaluate the spline at. must be [0,1]\n \n Returns\n -------\n np.ndarray\n a Mx2 numpy array where \n M[:, 0] = x\n M[:, 1] = y\n \"\"\"\n xs = np.linspace(0, 1, n)\n return self._spl(xs)\n\n\nclass SplineROI(pg.GraphicsObject):\n\n sigClicked = QtCore.Signal(object)\n\n def __init__(self, pos, *args, pen=\"default\", **kwargs):\n super(SplineROI, self).__init__(*args, **kwargs)\n\n self.dragPoint = None\n self.dragOffset = None\n\n self.ctrl_pts = pos\n\n self.spl = Spline(ctrl_pts=pos)\n\n self.scatter = pg.ScatterPlotItem(pxMode=True)\n self.scatter.setParentItem(self)\n self.scatter.sigClicked.connect(self.clicked)\n\n self.picture = None\n self.pen = pen\n self.setData(ctrl_pts=pos)\n\n self._mouseShape = None\n\n def _update(self):\n self.picture = None\n self.prepareGeometryChange()\n self.mouseShape()\n self.update()\n\n def setData(self, ctrl_pts):\n self.ctrl_pts = ctrl_pts\n self.scatter.setData(pos=ctrl_pts)\n self.spl.set_ctrl_pts(pos=ctrl_pts)\n self.informViewBoundsChanged()\n self._update()\n\n def mouseDoubleClickEvent(self, ev):\n if self.mouseShape().contains(ev.pos()):\n ev.accept()\n self.sigClicked.emit(self)\n\n click_pos = np.asarray([ev.pos().x(), ev.pos().y()])\n crv_data = self.spl(n=1000)\n\n ctrl_pts_passed = 0\n epsilon = 1\n\n # make a copy so we can mutate it without losing ctrl pts in the app\n _ctrl_pts = self.ctrl_pts.copy()\n\n # We find the closest point on the curve to where we clicked\n closest_crv_pt_idx = np.argmin(cdist([click_pos,], crv_data))\n\n # and move along the curve until we get to that point\n # all the while, we are interested in how many control points we pass\n for crv_pt in crv_data[:closest_crv_pt_idx, :]:\n ctrl_pt = _ctrl_pts[0]\n if cdist([crv_pt,], [ctrl_pt])[0] <= epsilon:\n # we have reached a control point\n ctrl_pts_passed += 1\n\n # remove that ctrl point from future considerations (because there\n # are likely many curve points that are close to the curve point we\n # just passed)\n _ctrl_pts.pop(0)\n\n self.ctrl_pts.insert(ctrl_pts_passed, tuple(click_pos))\n self.setData(self.ctrl_pts)\n\n def mouseDragEvent(self, ev):\n if ev.button() != QtCore.Qt.LeftButton:\n ev.ignore()\n return\n\n if ev.isStart():\n # We are already one step into the drag.\n # Find the point(s) at the mouse cursor when the button was first\n # pressed:\n pos = ev.buttonDownPos()\n pts = self.scatter.pointsAt(pos)\n if len(pts) == 0:\n ev.ignore()\n return\n self.dragPoint = pts[0]\n ind = pts[0]._index\n self.dragOffset = self.ctrl_pts[ind] - pos\n elif ev.isFinish():\n self.dragPoint = None\n return\n else:\n if self.dragPoint is None:\n ev.ignore()\n return\n\n ind = self.dragPoint._index\n self.ctrl_pts[ind] = tuple(ev.pos() + self.dragOffset)\n self.setData(ctrl_pts=self.ctrl_pts)\n ev.accept()\n\n def remove_point(self, idx):\n self.ctrl_pts.pop(idx)\n self.setData(self.ctrl_pts)\n\n def clicked(self, pts):\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n if modifiers == QtCore.Qt.ControlModifier:\n idx_clicked = pts.ptsClicked[0]._index\n if len(self.ctrl_pts) > 2:\n self.remove_point(idx_clicked)\n\n def generatePicture(self):\n self.picture = QtGui.QPicture()\n if self.pen is None or self.pos is None:\n return\n\n p = QtGui.QPainter(self.picture)\n try:\n xs = self.spl()[:, 0]\n ys = self.spl()[:, 1]\n pen = self.pen\n if pen == \"default\":\n pen = pg.getConfigOption(\"foreground\")\n p.setPen(pg.functions.mkPen(color=\"w\", width=3))\n path = pg.functions.arrayToQPath(x=xs, y=ys, connect=\"all\")\n p.drawPath(path)\n finally:\n p.end()\n\n def paint(self, p, *args, **kwargs):\n if self.picture == None:\n self.generatePicture()\n if pg.getConfigOption(\"antialias\") is True:\n print(\"setting antialias\")\n p.setRenderHint(True)\n self.picture.play(p)\n\n def boundingRect(self):\n xmn, xmx = float(np.min(self.spl()[:, 0])), float(np.max(self.spl()[:, 0]))\n ymn, ymx = float(np.min(self.spl()[:, 1])), float(np.max(self.spl()[:, 1]))\n\n px = py = 0.0\n pxPad = self.pixelPadding()\n if pxPad > 0:\n # determine length of pixel in local x, y directions\n px, py = self.pixelVectors()\n try:\n px = 0 if px is None else px.length()\n except OverflowError:\n px = 0\n try:\n py = 0 if py is None else py.length()\n except OverflowError:\n py = 0\n\n # return bounds expanded by pixel size\n px *= pxPad\n py *= pxPad\n\n return QtCore.QRectF(\n xmn - px, ymn - py, (2 * px) + xmx - xmn, (2 * py) + ymx - ymn\n )\n\n def dataBounds(self, *args, **kwds):\n # return pg.PlotCurveItem(pos=self.pos).dataBounds(*args, **kwds)\n return self.scatter.dataBounds(*args, **kwds)\n\n def pixelPadding(self):\n return self.scatter.pixelPadding()\n\n def getPath(self):\n crv_data = self.spl(n=1000)\n x, y = crv_data[:, 0], crv_data[:, 1]\n return pg.functions.arrayToQPath(x, y)\n\n def mouseShape(self):\n \"\"\"\n Return a QPainterPath representing the clickable shape of the curve\n\n \"\"\"\n # if self._mouseShape is None:\n view = self.getViewBox()\n if view is None:\n return QtGui.QPainterPath()\n stroker = QtGui.QPainterPathStroker()\n path = self.getPath()\n path = self.mapToItem(view, path)\n stroker.setWidth(10)\n mousePath = stroker.createStroke(path)\n self._mouseShape = self.mapFromItem(view, mousePath)\n return self._mouseShape\n\n\nclass SplineEditorTestWidget(QtGui.QWidget):\n \"\"\"Draw and edit a spline\"\"\"\n\n def __init__(self):\n super(SplineEditorTestWidget, self).__init__()\n\n self.spl_roi = SplineROI(pos=[(0, 0), (1, 1), (10, 10), (10, 20)])\n\n self.setGeometry(300, 300, 450, 450)\n self.setWindowTitle(\"Bezier Curves\")\n\n ###############################\n # Set up UI\n ###############################\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n self.vb = self.ui.canvas.addViewBox()\n self.vb.addItem(self.spl_roi)\n self.vb.addItem(pg.GridItem())\n\n\nif __name__ == \"__main__\":\n qapp = QtWidgets.QApplication(sys.argv)\n se = SplineEditorTestWidget()\n se.show()\n qapp.exec_()\n", "sub_path": "pharynx_redox/gui/spline_editor.py", "file_name": "spline_editor.py", "file_ext": "py", "file_size_in_byte": 8854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pyqtgraph.Point", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.interpolate.Akima1DInterpolator", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 63, "usage_type": "call"}, {"api_name": "pyqtgraph.GraphicsObject", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Signal", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 69, "usage_type": "name"}, {"api_name": "pyqtgraph.ScatterPlotItem", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 119, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 119, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 125, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 138, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 138, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.keyboardModifiers", "line_number": 172, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 172, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 172, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 173, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 173, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPicture", "line_number": 179, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 179, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 183, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 183, "usage_type": "name"}, {"api_name": "pyqtgraph.getConfigOption", "line_number": 189, "usage_type": "call"}, {"api_name": "pyqtgraph.functions.mkPen", "line_number": 190, "usage_type": "call"}, {"api_name": "pyqtgraph.functions", "line_number": 190, "usage_type": "attribute"}, {"api_name": "pyqtgraph.functions.arrayToQPath", "line_number": 191, "usage_type": "call"}, {"api_name": "pyqtgraph.functions", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pyqtgraph.getConfigOption", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 206, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRectF", "line_number": 226, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 226, "usage_type": "name"}, {"api_name": "pyqtgraph.functions.arrayToQPath", "line_number": 240, "usage_type": "call"}, {"api_name": "pyqtgraph.functions", "line_number": 240, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPainterPath", "line_number": 250, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 250, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPainterPathStroker", "line_number": 251, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 251, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QWidget", "line_number": 260, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 260, "usage_type": "name"}, {"api_name": "pharynx_redox.gui.qt_py_files.spline_editor.Ui_Form", "line_number": 274, "usage_type": "call"}, {"api_name": "pyqtgraph.GridItem", "line_number": 278, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 282, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 282, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 282, "usage_type": "attribute"}]} +{"seq_id": "338085136", "text": "import cv2\nimport numpy as np \n\nimg=cv2.imread(\"1713054_ccn_IA2.PNG\")\nx,y,z=img.shape\n\nnx=x//7\nny=y//7\na=0\nb=0\ncounter=0\nwhile(a<=y):\n counter+=1\n while(b<=x and b>=0):\n color= [np.random.randint(0,255),np.random.randint(0,255),np.random.randint(0,255)]\n if counter%2!=0:\n cv2.rectangle(img, (b,a), (b+nx,a+ny),color,-1)\n b+=nx\n cv2.waitKey(500)\n cv2.imshow('Frame',img)\n else:\n cv2.rectangle(img, (b,a), (b+nx,a+ny),color,-1)\n b-=nx\n cv2.waitKey(500)\n cv2.imshow('Frame',img)\n if counter%2!=0:\n b-=nx\n else:\n b=0\n a+=ny\ncv2.imshow('Frame',img)\ncv2.waitKey(0)", "sub_path": "Assignment_3/Q2.py", "file_name": "Q2.py", "file_ext": "py", "file_size_in_byte": 697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "552827067", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 17 14:25:03 2020\n\n@author: florian\nedit: wedemannr\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntry:\n plt.close('all')\nexcept:\n pass\n\n\n### Settings\ndt = 2**(-6)\ndt_f = 2**(-11)\nn=int(1/dt_f)+1\nx = np.zeros(int(1/dt_f)+1)\nx_true = np.zeros(int(1/dt_f)+1)\nx0 = 1.0\na = 1.5\nb = 1.0\nt_f = np.arange(0,1+dt_f,dt_f)\nt = np.arange(0,1+dt,dt)\nx_n = 1\nxn = []\n\n###Calculations\n\n##Random Walk\ndW = np.random.normal(0,1,n)*np.sqrt(dt_f)\nW = np.cumsum(dW)\ncount = 0\n##Analytical solution\nfor i in range(len(t_f)):\n x_true[i] = x0 * np.exp((a-0.5*b**2)*t_f[i]+b*W[i]) #Analytical \n count += dW[i]\n if (t_f[i] in t):\n print(t_f[i])\n x_n = x_n + a*x_n*dt + b*x_n*count #Approximation\n xn.append(x_n)\n count = 0\n\n#%% Plot\n\n##Random Walk\nplt.figure()\nplt.plot(W)\nplt.title(\"Random Walk\")\n\n##Analytical and approximated solutions\nx1 = np.linspace(0,1,len(x_true))\nx2 = np.linspace(0,1,len(xn))\n\nplt.figure()\nplt.plot(x1,x_true,label=\"x_true\")\nplt.plot(x2,xn,\"r\",label=\"xn\")\nplt.legend()\n\n##Plot Random Numbers dW\nplt.figure()\nplt.plot(dW)\nplt.title(\"dW\")\nplt.show()", "sub_path": "random_walk.py", "file_name": "random_walk.py", "file_ext": "py", "file_size_in_byte": 1186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "390889042", "text": "import speech_recognition as sr\nfrom pprint import pprint\nfrom commands import main\n\nrecognizer = sr.Recognizer()\nmicrophone = sr.Microphone()\n\n\nIsLooping = True\nIsSpeech = True\n\nwhile IsLooping:\n with microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n print('Say something...')\n audio = recognizer.listen(source)\n\n try:\n audio = str(recognizer.recognize_google(audio)).lower()\n\n main(audio)\n\n if 'goodbye' in audio:\n IsLooping = IsSpeech = False\n\n if IsSpeech:\n input('Speech: Press ENTER if you wanna say something...')\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n continue\n\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\nprint('Speech: Goodbye.')", "sub_path": "speech-recognition/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 894, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 5, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 6, "usage_type": "call"}, {"api_name": "commands.main", "line_number": 21, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 29, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "50687544", "text": "# Create your views here.\nfrom django.http import HttpResponse, Http404\nfrom django.template.loader import get_template\nfrom django import template\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import get_object_or_404, get_list_or_404\nfrom django.template.context import RequestContext\nfrom django.utils import translation\nimport abi.settings\nfrom portfolio.models import *\nfrom abinito.models import *\nfrom event.models import *\n\nRowNumber = 5\nportfolio_sufix = 'portfolio'\nfour = 3\n\ndef five_last_post(number = RowNumber):\n return PortfRecord.objects.all().order_by('-created')[:number]\n\ndef categorys_in_row(without_category, number_in_row = RowNumber - 1):\n categ = PortfCateg.objects.all().exclude(name_in_uri = without_category)\n res = []\n for row in xrange(0, len(categ) // number_in_row + 1):\n res += [categ[row:row + number_in_row]]\n return res\n\nfrom abinito.views import change_lang, set_lang\ndef portf_index(request):\n request = set_lang(request)\n return render_to_response('index_portfolio.html',\n {\n 'lastposts': five_last_post(),\n 'categorys': PortfCateg.objects.all(),\n 'backgr': MainMenuItem.objects.get(url = portfolio_sufix),\n 'mainmenu': MainMenuItem.objects.all().exclude(url = '/'),\n 'lang': change_lang(request),\n 'lasts': EventRecord.objects.all().order_by('id')[:four],\n }\n )\n\ndef portf_categ_list(request, categ):\n request = set_lang(request)\n return render_to_response('category_portfolio.html',\n {\n 'lastposts': five_last_post(),\n 'categ': PortfCateg.objects.get(name_in_uri = categ),\n 'categ_down': categorys_in_row(categ),\n 'backgr': MainMenuItem.objects.get(url = portfolio_sufix),\n 'mainmenu': MainMenuItem.objects.all().exclude(url = '/'),\n 'lang': change_lang(request),\n 'lasts': EventRecord.objects.all().order_by('id')[:four],\n }\n )\n\ndef portf_item(request, portf):\n request = set_lang(request)\n project = PortfRecord.objects.get(url = portf)\n return render_to_response('project_portfolio.html',\n {\n 'lastposts': five_last_post(),\n 'categ_down': categorys_in_row(project.category.name_in_uri),\n 'portfolio': project,\n 'backgr': MainMenuItem.objects.get(url = portfolio_sufix),\n 'mainmenu': MainMenuItem.objects.all().exclude(url = '/'),\n 'lang': change_lang(request),\n 'lasts': EventRecord.objects.all().order_by('id')[:four],\n }\n )\n", "sub_path": "abi/portfolio/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "abinito.views.set_lang", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 31, "usage_type": "call"}, {"api_name": "abinito.views.change_lang", "line_number": 37, "usage_type": "call"}, {"api_name": "abinito.views.set_lang", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 44, "usage_type": "call"}, {"api_name": "abinito.views.change_lang", "line_number": 51, "usage_type": "call"}, {"api_name": "abinito.views.set_lang", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 59, "usage_type": "call"}, {"api_name": "abinito.views.change_lang", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "355962505", "text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\nfrom .models import Model, Report, Table, Table_Model, Model_Template_2, Choo, choo_template_Publisher, \\\n choo_template_Country, choo_template_Device, Argenta, Argenta_Model, Belfius, Belfius_Model, cz_priceless_model, cz_priceless\n\n\nclass ChoiceInline(admin.StackedInline):\n model = Model\n extra = 0\n classes = ['collapse', 'suit-3column']\n list_display = ('CPA_Retargeting', 'Total_Conv_Retargeting', 'CPA_Prospecting', 'Total_Conv_Prospecting', 'published_date',)\n\n fieldsets = (\n ('', {'fields': (\n ('CPA_Retargeting', 'Total_Conv_Retargeting',),\n ('CPA_Prospecting', 'Total_Conv_Prospecting',),\n ('published_date',),\n ),\n }),\n )\n\nclass ChoiceInline2(admin.StackedInline):\n model = Model_Template_2\n extra = 0\n classes = ['collapse', 'suit-3column']\n list_display = ('CPA', 'Conversions', 'CPA_Prospecting', 'Total_Conv_Prospecting', 'published_date',)\n\n fieldsets = (\n ('', {'fields': (\n ('CPA', 'Conversions',),\n ('published_date',),\n ),\n }),\n )\n\nclass ChoiceInline_Table(admin.StackedInline):\n model = Table_Model\n extra = 0\n classes = ['collapse', 'suit-3column']\n list_display = ('impressions', 'click', 'media_budget', 'conversions', 'published_date', 'campaing_pause', 'active_cpm', 'pacing',)\n\n fieldsets = (\n ('', {'fields': (\n ('campaing_pause', 'target_cpa', 'active_cpm', 'pacing',),\n ('day_to_go', 'campaing_progress_days', 'day_of_the_week',),\n ('impressions', 'click',),\n ('media_budget', 'conversions',),\n ('impressions_fb', 'click_fb', 'conversions_fb',),\n ('published_date',),\n ),\n }),\n )\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = ('report_title', 'slug', 'id', 'created_date', 'published_date', 'author', 'active_model', 'active_json',)\n search_fields = ('report_title', 'id',)\n inlines = [ChoiceInline, ChoiceInline_Table]\n list_filter = ('published_date', 'author',)\n\n fieldsets = (\n ('Report activation', {'fields': (\n ('active_json', 'active_model', 'active_model_2', 'active_model_3', 'active_model_4', 'active_model_5', 'active_table',),\n ('chart_js',),\n ),\n }),\n ('Report info', {'fields': (\n ('author', 'report_title',),\n ),\n }),\n ('Report upload', {'fields': (\n ('upload_report', 'logo_campaign',),\n ),\n }),\n ('Report data', {'fields': (\n ('created_date', 'published_date',),\n ),\n }),\n )\n\nclass ProjectAdminTable(admin.ModelAdmin):\n list_display = ('author', 'table_title', 'start_date', 'end_date', 'published_date', 'target_impressions', 'target_ecpm', 'target_click', 'target_ctr', 'target_media_budget', 'target_cpa', 'target_conversions', 'active_cpm', 'target_pacing', 'target_media_budget_pacing',)\n search_fields = ('report_title', 'id',)\n list_filter = ('published_date', 'author',)\n\n fieldsets = (\n ('Table info', {'fields': (\n ('author', 'table_title', ),\n ),\n }),\n ('Tabele data', {'fields': (\n ('start_date', 'end_date', 'published_date',),\n ),\n }),\n ('Table target', {'fields': (\n ('active_cpm', 'target_pacing', 'target_media_budget_pacing',),\n ('target_impressions', 'target_ecpm', 'target_click',),\n ('target_ctr', 'target_media_budget', 'target_cpa',),\n ('target_conversions',),\n ),\n }),\n ('Table target Facebook', {'fields': (\n ('target_impressions_fb', 'target_click_fb', 'target_conversions_fb', 'target_ctr_fb',),\n ),\n }),\n )\n\n\nclass ProjectAdminChoo(admin.StackedInline):\n model = choo_template_Publisher\n classes = ['collapse']\n extra = 0\n list_display = ('ft_clicks', 'ft_impressions', 'indipendent_clicks', 'indipendent_impressions', 'economist_clicks', 'economist_impressions', 'gq_clicks', 'gq_impressions', 'wired_clicks', 'wired_impressions', 'internazionale_clicks', 'internazionale_impressions', 'corriere_clicks', 'corriere_impressions', 'hp_clicks', 'hp_impressions', 'gazzetta_clicks', 'gazzetta_impressions', 'monde_clicks', 'monde_impressions', 'offieciel_clicks', 'officiel_impressions', 'figaro_clicks', 'figaro_impressions', 'obs_clicks', 'obs_impressions', 'published_date', 'day_to_go', 'campaing_progress_days', 'day_of_the_week',)\n\n fieldsets = (\n ('', {'fields': (\n ('day_to_go', 'campaing_progress_days', 'day_of_the_week'),\n ),\n }),\n ('', {'fields': (\n ('ft_impressions', 'ft_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('indipendent_impressions', 'indipendent_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('economist_impressions', 'economist_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('gq_impressions', 'gq_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('wired_impressions', 'wired_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('internazionale_impressions', 'internazionale_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('corriere_impressions', 'corriere_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('hp_impressions', 'hp_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('gazzetta_impressions', 'gazzetta_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('monde_impressions', 'monde_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('officiel_impressions', 'offieciel_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('figaro_impressions', 'figaro_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('obs_impressions', 'obs_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('published_date', ),\n ),\n }),\n )\n\n\nclass ProjectAdminChooCountry(admin.StackedInline):\n model = choo_template_Country\n classes = ['collapse']\n extra = 0\n list_display = ('uk_clicks', 'uk_impressions', 'italy_clicks', 'italy_impressions', 'france_clicks', 'france_impressions', 'published_date',)\n\n fieldsets = (\n ('', {'fields': (\n ('day_to_go', 'campaing_progress_days', 'day_of_the_week'),\n ),\n }),\n ('', {'fields': (\n ('italy_impressions', 'italy_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('france_impressions','france_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('uk_impressions', 'uk_clicks',),\n ),\n }),\n ('', {'fields': (\n ('published_date',),\n ),\n }),\n )\n\n\nclass ProjectAdminChooDevice(admin.StackedInline):\n model = choo_template_Device\n classes = ['collapse']\n extra = 0\n list_display = ('desktop_clicks', 'desktop_impressions', 'mobile_clicks', 'mobile_impressions', 'published_date',)\n\n fieldsets = (\n ('', {'fields': (\n ('day_to_go', 'campaing_progress_days', 'day_of_the_week'),\n ),\n }),\n ('', {'fields': (\n ('desktop_impressions', 'desktop_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('mobile_impressions', 'mobile_clicks', ),\n ),\n }),\n ('', {'fields': (\n ('published_date',),\n ),\n }),\n )\n\nclass ChooAdmin(admin.ModelAdmin):\n inlines = [ProjectAdminChoo, ProjectAdminChooCountry, ProjectAdminChooDevice]\n list_display = ('choo_title', 'author', 'active_model', 'published_date', 'slug', 'active_table')\n\n fieldsets = (\n ('', {'fields': (\n ('author', 'choo_title', 'active_model', 'active_table', 'hide_graph',),\n ),\n }),\n ('', {'fields': (\n ('published_date', 'logo_campaign',),\n ),\n }),\n ('', {'fields': (\n ('start_date', 'end_date',),\n ),\n }),\n )\n\nclass ProjectArgenta(admin.StackedInline):\n model = Argenta_Model\n classes = ['collapse']\n extra = 0\n list_display = ('impressions', 'impressions_fb', 'click', 'click_fb', 'published_date',)\n\n fieldsets = (\n ('', {'fields': (\n ('day_to_go', 'campaing_progress_days', 'day_of_the_week'),\n ),\n }),\n ('Google AdWords', {'fields': (\n ('impressions', 'click', ),\n ),\n }),\n ('Facebook', {'fields': (\n ('impressions_fb', 'click_fb', ),\n ),\n }),\n ('', {'fields': (\n ('published_date',),\n ),\n }),\n )\n\nclass ArgentaAdmin(admin.ModelAdmin):\n inlines = [ProjectArgenta]\n list_display = ('argenta_title', 'author', 'active_model', 'published_date', 'slug', 'active_table',)\n\n fieldsets = (\n ('', {'fields': (\n ('author', 'argenta_title', 'active_model', 'active_table', 'hide_graph',),\n ),\n }),\n ('', {'fields': (\n ('published_date', 'logo_campaign',),\n ),\n }),\n ('', {'fields': (\n ('start_date', 'end_date',),\n ),\n }),\n )\n\n\nclass ProjectBelfius(admin.StackedInline):\n model = Belfius_Model\n classes = ['collapse']\n extra = 0\n list_display = ('impressions', 'impressions_fb', 'impressions_vd', 'click', 'click_fb', 'click_vd', 'published_date',)\n\n fieldsets = (\n ('', {'fields': (\n ('day_to_go', 'campaing_progress_days', 'day_of_the_week'),\n ),\n }),\n ('Video (TOP) DBM', {'fields': (\n ('impressions', 'click', 'complete_view',),\n ),\n }),\n ('Facebook', {'fields': (\n ('impressions_fb', 'click_fb', 'complete_view_fb',),\n ),\n }),\n ('Display (MYNT) DBM', {'fields': (\n ('impressions_vd', 'click_vd', 'complete_view_vd',),\n ),\n }),\n ('', {'fields': (\n ('published_date',),\n ),\n }),\n )\n\nclass BelfiusAdmin(admin.ModelAdmin):\n inlines = [ProjectBelfius]\n list_display = ('belfius_title', 'author', 'active_model', 'published_date', 'slug', 'active_table', 'hide_video', 'hide_facebook', 'hide_display',)\n\n fieldsets = (\n ('', {'fields': (\n ('author', 'belfius_title', 'active_model', 'active_table', 'hide_graph', 'hide_video', 'hide_facebook', 'hide_display',),\n ),\n }),\n ('', {'fields': (\n ('published_date', 'logo_campaign',),\n ),\n }),\n ('', {'fields': (\n ('start_date', 'end_date',),\n ),\n }),\n )\n\n\nclass ProjectCZPriceless(admin.StackedInline):\n model = cz_priceless_model\n classes = ['collapse']\n extra = 0\n list_display = ('impressions', 'click',)\n\n fieldsets = (\n ('', {'fields': (\n ('day_to_go', 'campaing_progress_days', 'day_of_the_week'),\n ),\n }),\n ('', {'fields': (\n ('impressions', 'click',),\n ),\n }),\n ('', {'fields': (\n ('published_date',),\n ),\n }),\n )\n\nclass CZPricelessAddmin(admin.ModelAdmin):\n inlines = [ProjectCZPriceless]\n list_display = ('priceless_title', 'author', 'active_model', 'published_date', 'slug', 'active_table',)\n\n fieldsets = (\n ('', {'fields': (\n ('author', 'priceless_title', 'active_model', 'active_table', 'hide_graph',),\n ),\n }),\n ('', {'fields': (\n ('published_date', 'logo_campaign',),\n ),\n }),\n ('', {'fields': (\n ('start_date', 'end_date',),\n ),\n }),\n )\n\n\nadmin.site.register(Report, ProjectAdmin)\nadmin.site.register(Choo, ChooAdmin)\nadmin.site.register(Argenta, ArgentaAdmin)\nadmin.site.register(Belfius, BelfiusAdmin)\nadmin.site.register(cz_priceless, CZPricelessAddmin)", "sub_path": "chart/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 12143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.contrib.admin.StackedInline", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Model", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Model_Template_2", "line_number": 24, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Table_Model", "line_number": 38, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 55, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 81, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 109, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 109, "usage_type": "name"}, {"api_name": "models.choo_template_Publisher", "line_number": 110, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 179, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 179, "usage_type": "name"}, {"api_name": "models.choo_template_Country", "line_number": 180, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 209, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 209, "usage_type": "name"}, {"api_name": "models.choo_template_Device", "line_number": 210, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 234, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 234, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 253, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 253, "usage_type": "name"}, {"api_name": "models.Argenta_Model", "line_number": 254, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 278, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 278, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 298, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 298, "usage_type": "name"}, {"api_name": "models.Belfius_Model", "line_number": 299, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 327, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 327, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 347, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 347, "usage_type": "name"}, {"api_name": "models.cz_priceless_model", "line_number": 348, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 368, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 368, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 388, "usage_type": "call"}, {"api_name": "models.Report", "line_number": 388, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 388, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 388, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 389, "usage_type": "call"}, {"api_name": "models.Choo", "line_number": 389, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 389, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 389, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 390, "usage_type": "call"}, {"api_name": "models.Argenta", "line_number": 390, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 390, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 390, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 391, "usage_type": "call"}, {"api_name": "models.Belfius", "line_number": 391, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 391, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 391, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 392, "usage_type": "call"}, {"api_name": "models.cz_priceless", "line_number": 392, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 392, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 392, "usage_type": "name"}]} +{"seq_id": "215114728", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 26 17:06:21 2016\n\n@author: c.senik\n\"\"\"\n\n########################################################\n# Load packages (need to be installed first if \n# not yet done - but is not difficult)\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt # pour plot functons\nimport scipy.sparse as sp # pour la construction de matrice creuse\nfrom scipy.sparse import linalg\n\n########################################################\n# Parametres du probleme\nu1l = -1.5\nu1r = 1.5 ## intervalle pour u1 [u1l, u1r]\nu2l = -1.5\nu2r = 1.5 ## intervalle pour u2 [u2l, u2r]\npu1 = 0.01 #pas pour u1 \npu2 = 0.01 #pas pour u2\nnl = 20 #nombre de lignes de niveauw désirées pour question I.1.1.\nU0 = [1.,0.] #condition initiale Questiont I.1.2.\npho=0.02 #le pas de descente\npho0=0.5 #le pas de descente initiale pour la méthode de gradient optimal\neps=0.001 #le seuil d'arrêt de la descente\ncompteur=0 #nombre d'itérés\nU0Test=[1.,1.] #condition initiale test\n###### Question I.1.1. Tracer les lignes de niveau ######\n\ndef J(u1,u2):\n return (u1-1.)**2 + 100.*(u1**2 - u2)**2\nU1 = np.arange(u1l, u1r, pu1)\nU2 = np.arange(u2l, u2r, pu2)\nU1, U2 = np.meshgrid(U1, U2)\nZ = J(U1, U2)\nplt.contourf(U1, U2, Z, nl)\nplt.colorbar()\nplt.show()\n\n\ndef gradJ(u,compteur):\n compteur=compteur+1\n return [2.*(u[0]-1.) + 200.*(u[0]**2-u[1])*2.*u[0] , -200.*(u[0]**2-u[1]) ]\n# On ́ecrit la descente de gradient.\n# Noter que l’on ne fait qu’une seule ́evalutation de la fonction gradient.\ndef methodeDescente(pho,u0,eps,compteur):\n a=u0\n u1=[u0[0]]\n u2=[u0[1]]\n while (np.linalg.norm(gradJ(a,compteur))>eps):\n grad=gradJ(a,compteur)\n a[0]=a[0]-pho*grad[0]\n a[1]=a[1]-pho*grad[1]\n \n u1.append(a[0])\n u2.append(a[1])\n \n## On va tracer les itérés de l'abcisse et de l'ordonnée de u respectivement \n lg=len(u1)\n x=np.linspace(0,lg-1,lg)\n p1=plt.plot(x,u1,marker='o')\n p2=plt.plot(x,u2,marker='v')\n plt.title(\"Méthodes du pas de gradient\") # Problemes avec accents (plot_directive) !\n plt.legend([p1, p2], [\"Abscisse\", \"Ordonnée\"])\n plt.show()\n \n## exemple \nmethodeDescente(pho,U0,eps,0) \n \n \n###### test sur la fonction norme L2 au carré, de minimum (0,0) ######\ndef gradTest(u,compteur):\n return [2*u[0],2*u[1]]\ndef methodeDescenteTest(pho,u0,eps,compteur):\n a=u0\n u1=[u0[0]]\n u2=[u0[1]]\n while (np.linalg.norm(gradTest(a,compteur))>eps):\n grad=gradTest(a,compteur)\n a[0]=a[0]-pho*grad[0]\n a[1]=a[1]-pho*grad[1]\n \n u1.append(a[0])\n u2.append(a[1])\n lg=len(u1)\n x=np.linspace(0,lg-1,lg)\n p1=plt.plot(x,u1,marker='o')\n p2=plt.plot(x,u2,marker='v')\n plt.title(\"Méthodes du pas de gradient\") # Problemes avec accents (plot_directive) !\n plt.legend([p1, p2], [\"Abscisse\", \"Ordonnée\"])\n plt.show()\n \n## exemple sur le test, enlevez les ## pour essayer, et jouer avec les paramètres pho, donnée initiale teste et seuil epsilon pour constater que cela marche!\n## methodeDescenteTest(pho,U0Test,eps,0) \n \n###### Question I.2.1 #####\n \ndef methodeDescenteOpt(pho0,u0,eps,compteur,lamb):\n pho=pho0\n a=u0\n u1=[u0[0]]\n u2=[u0[1]]\n while (np.linalg.norm(gradJ(a,compteur))>eps):\n grad=gradJ(a,compteur)\n a[0]=a[0]-pho*grad[0]\n a[1]=a[1]-pho*grad[1]\n \n u1.append(a[0])\n u2.append(a[1]) ", "sub_path": "Method2.py", "file_name": "Method2.py", "file_ext": "py", "file_size_in_byte": 3444, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.arange", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 107, "usage_type": "attribute"}]} +{"seq_id": "125245774", "text": "import numpy as np\nimport os\nfrom time import time\nfrom amset.utils.constants import comp_to_dirname\nfrom amset.utils.pymatgen_loader_for_bzt2 import PymatgenLoader\nfrom matminer import PlotlyFig\nfrom BoltzTraP2 import sphere, fite\nfrom pymatgen.io.vasp import Vasprun\nfrom amset.utils.band_interpolation import get_energy_args, interpolate_bs\nfrom amset.utils.band_structure import get_bindex_bspin\n\n\"\"\"\nThis script is to compare the energy, velocity and effective mass calculated \nfrom the band structures interpolated via BoltzTraP1 vs. BoltzTraP2 to check\ntheir consistency. If KPOINTS file is fed to the Vasprun.get_bandstructure() \nmethod, one can compare the band structures in line-mode which is more visually \nappealing.\n\"\"\"\n\ndef retrieve_bs_boltztrap1(coeff_file, bs, ibands, matrix=None):\n interp_params = get_energy_args(coeff_file, ibands)\n pf = PlotlyFig(filename='Energy-bt1')\n plot_data =[]\n v_data = []\n mass_data = []\n trace_names = []\n Eref = 0.0\n vels = {iband: [] for iband in ibands}\n sym_line_kpoints = []\n\n for i, iband in enumerate(ibands):\n sym_line_kpoints = [k.frac_coords for k in bs.kpoints]\n\n en, vel, masses = interpolate_bs(sym_line_kpoints, interp_params, iband=i,\n method=\"boltztrap1\", scissor=0.0, matrix=matrix, n_jobs=-1)\n vel = np.linalg.norm(vel, axis=1)\n masses = [mass.trace()/3.0 for mass in masses]\n if i==0:\n Eref = max(en)\n en = [E - Eref for E in en]\n plot_data.append((list(range(len(en))), en))\n v_data.append((en, vel))\n mass_data.append((en, masses))\n trace_names.append('band {}'.format(iband))\n\n\n pf.xy(plot_data, names=[n for n in trace_names], labels=[sym_line_kpoints])\n pf2 = PlotlyFig(filename='Velocity-bt1')\n pf2.xy(v_data, names=[n for n in trace_names])\n pf3 = PlotlyFig(filename='mass-bt1')\n pf3.xy(mass_data, names=[n for n in trace_names])\n\n\ndef retrieve_bs_boltztrap2(vrun, bs, ibands, matrix=None):\n pf = PlotlyFig(filename='Energy-bt2')\n sym_line_kpoints = [k.frac_coords for k in bs.kpoints]\n bz_data = PymatgenLoader(vrun)\n equivalences = sphere.get_equivalences(atoms=bz_data.atoms, nkpt=len(bz_data.kpoints) * 5, magmom=None)\n lattvec = bz_data.get_lattvec()\n coeffs = fite.fitde3D(bz_data, equivalences)\n kpts = np.array(sym_line_kpoints)\n interp_params = (equivalences, lattvec, coeffs)\n plot_data = []\n v_data = []\n names = []\n mass_data = []\n eref = 0.0\n for ith, iband in enumerate(ibands):\n en, vel, masses = interpolate_bs(kpts, interp_params, iband=iband,\n method=\"boltztrap2\", matrix=matrix)\n # method = \"boltztrap2\", matrix = lattvec * 0.529177)\n if ith==0:\n eref = np.max(en)\n en -= eref\n plot_data.append((list(range(len(en))), en ))\n v_data.append((en, np.linalg.norm(vel, axis=1)))\n mass_data.append((en, [mass.trace()/3.0 for mass in masses]))\n names.append('band {}'.format(iband+1))\n pf.xy(plot_data, names=[n for n in names])\n pf2 = PlotlyFig(filename='Velocity-bt2')\n pf2.xy(v_data, names=[n for n in names])\n pf3 = PlotlyFig(filename='mass-bt2')\n pf3.xy(mass_data, names=[n for n in names])\n\nif __name__ == \"__main__\":\n # user inputs\n COMPOUND = 'GaAs' # You can try: GaAs, Si, PbTe, InP, AlCuS2, In2O3\n DIR = os.path.dirname(__file__)\n test_dir = os.path.join(DIR, '../test_files')\n vruns = {c: Vasprun(os.path.join(test_dir, comp_to_dirname[c],\n 'vasprun.xml')) for c in comp_to_dirname}\n coeff_files = {c: os.path.join(test_dir, comp_to_dirname[c],\n 'fort.123') for c in comp_to_dirname}\n\n vrun = vruns[COMPOUND]\n cube_path = coeff_files[COMPOUND]\n\n # bs = vrun.get_band_structure(\n # kpoints_filename=os.path.join(test_dir, kpoints_path), line_mode=True)\n bs = vrun.get_band_structure()\n\n rec_matrix = vrun.final_structure.lattice.reciprocal_lattice.matrix\n dir_matrix = vrun.final_structure.lattice.matrix\n\n st = vrun.final_structure\n\n vbm_idx, _ = get_bindex_bspin(bs.get_vbm(), is_cbm=False)\n cbm_idx, _ = get_bindex_bspin(bs.get_cbm(), is_cbm=True)\n ibands = [vbm_idx+1, cbm_idx+1]\n\n coeff_file = os.path.join(test_dir, cube_path)\n start_time = time()\n retrieve_bs_boltztrap1(coeff_file=coeff_file, bs=bs, ibands=ibands, matrix=dir_matrix)\n\n retrieve_bs_boltztrap2(vrun, bs=bs, ibands=ibands, matrix=dir_matrix)\n", "sub_path": "scripts/retrieve_band_structure.py", "file_name": "retrieve_band_structure.py", "file_ext": "py", "file_size_in_byte": 4559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "amset.utils.band_interpolation.get_energy_args", "line_number": 21, "usage_type": "call"}, {"api_name": "matminer.PlotlyFig", "line_number": 22, "usage_type": "call"}, {"api_name": "amset.utils.band_interpolation.interpolate_bs", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 36, "usage_type": "attribute"}, {"api_name": "matminer.PlotlyFig", "line_number": 48, "usage_type": "call"}, {"api_name": "matminer.PlotlyFig", "line_number": 50, "usage_type": "call"}, {"api_name": "matminer.PlotlyFig", "line_number": 55, "usage_type": "call"}, {"api_name": "amset.utils.pymatgen_loader_for_bzt2.PymatgenLoader", "line_number": 57, "usage_type": "call"}, {"api_name": "BoltzTraP2.sphere.get_equivalences", "line_number": 58, "usage_type": "call"}, {"api_name": "BoltzTraP2.sphere", "line_number": 58, "usage_type": "name"}, {"api_name": "BoltzTraP2.fite.fitde3D", "line_number": 60, "usage_type": "call"}, {"api_name": "BoltzTraP2.fite", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "amset.utils.band_interpolation.interpolate_bs", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 76, "usage_type": "attribute"}, {"api_name": "matminer.PlotlyFig", "line_number": 80, "usage_type": "call"}, {"api_name": "matminer.PlotlyFig", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pymatgen.io.vasp.Vasprun", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "amset.utils.constants.comp_to_dirname", "line_number": 90, "usage_type": "name"}, {"api_name": "amset.utils.constants.comp_to_dirname", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "amset.utils.constants.comp_to_dirname", "line_number": 92, "usage_type": "name"}, {"api_name": "amset.utils.constants.comp_to_dirname", "line_number": 93, "usage_type": "name"}, {"api_name": "amset.utils.band_structure.get_bindex_bspin", "line_number": 107, "usage_type": "call"}, {"api_name": "amset.utils.band_structure.get_bindex_bspin", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "223227115", "text": "\"\"\"CPU functionality.\"\"\"\n\nimport sys\nfrom datetime import datetime\nfrom msvcrt import kbhit, getch\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n # Initialize Register\n self.reg = [0] * 8\n self.reg[7] = 0xF4\n\n # Initialize Memory\n self.ram = [0] * 256\n\n # Initialize internal registers\n self.pc = 0\n self.ir = 0\n self.mar = 0\n self.mdr = 0\n self.fl = 0\n\n # Initialize operation values\n self.operand_a = 0\n self.operand_b = 0\n\n # Initialize halt as false. CPU does not start halted\n\n self.halt = False\n\n # Initialize pc_override for when pc is manually set by function\n\n self.pc_override = False\n\n # Intitialize interrupt \n\n self.interrupt = True\n\n # Initialize branch_table\n self.branch_table = {}\n self.branch_table[0b10000010] = self.LDI\n self.branch_table[0b01000111] = self.PRN\n self.branch_table[0b00000001] = self.HLT\n self.branch_table[0b01000101] = self.PUSH\n self.branch_table[0b01000110] = self.POP\n self.branch_table[0b01010000] = self.CALL\n self.branch_table[0b00010001] = self.RET\n self.branch_table[0b01010010] = self.INT\n self.branch_table[0b00010011] = self.IRET\n self.branch_table[0b01010101] = self.JEQ\n self.branch_table[0b01011010] = self.JGE\n self.branch_table[0b01010111] = self.JGT\n self.branch_table[0b01011001] = self.JLE\n self.branch_table[0b01011000] = self.JLT\n self.branch_table[0b01010100] = self.JMP\n self.branch_table[0b01010110] = self.JNE\n self.branch_table[0b10000011] = self.LD\n self.branch_table[0b00000000] = self.NOP\n self.branch_table[0b01001000] = self.PRA\n self.branch_table[0b10000100] = self.ST\n\n\n \n \n # Initialize alu_table\n self.alu_table = {}\n self.alu_table[0b10100010] = \"MUL\"\n self.alu_table[0b10100000] = \"ADD\"\n self.alu_table[0b10101000] = \"AND\"\n self.alu_table[0b10100111] = \"CMP\"\n self.alu_table[0b01100110] = \"DEC\"\n self.alu_table[0b10100011] = \"DIV\"\n self.alu_table[0b01100101] = \"INC\"\n self.alu_table[0b10100100] = \"MOD\"\n self.alu_table[0b01101001] = \"NOT\"\n self.alu_table[0b10101010] = \"OR\"\n self.alu_table[0b10101100] = \"SHL\"\n self.alu_table[0b10101101] = \"SHR\"\n self.alu_table[0b10100001] = \"SUB\"\n self.alu_table[0b10101011] = \"XOR\"\n\n # Initialize Interupt Table\n self.interrupt_table = {}\n self.interrupt_table[0b00000001] = 0xF8\n self.interrupt_table[0b00000010] = 0xF9\n\n\n def load(self, program):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n for instruction in program:\n self.ram_write(address, instruction)\n address += 1\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n elif op == \"SUB\": \n self.reg[reg_a] -= self.reg[reg_b]\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n elif op == \"AND\":\n self.reg[reg_a] &= self.reg[reg_b]\n elif op == \"CMP\":\n if self.reg[reg_a] == self.reg[reg_b]:\n self.fl = 0b00000001\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.fl = 0b00000010\n else:\n self.fl = 0b00000100\n elif op == \"DEC\":\n self.reg[reg_a] -= 1\n elif op == \"DIV\":\n if self.reg[reg_b] == 0:\n print(\"Error: Cannot divide by zero!\")\n self.halt = True\n else:\n self.reg[reg_a] /= self.reg[reg_b]\n elif op == \"INC\":\n self.reg[reg_a] += 1\n elif op == \"MOD\":\n if self.reg[reg_b] == 0:\n print(\"Error: Cannot divide by zero!\")\n self.halt = True\n else:\n self.reg[reg_a] %= self.reg[reg_b]\n elif op == \"NOT\":\n self.reg[reg_a] = ~self.reg[reg_a] \n elif op == \"OR\":\n self.reg[reg_a] |= self.reg[reg_b]\n elif op == \"SHL\":\n self.reg[reg_a] <<= self.reg[reg_b]\n elif op == \"SHR\":\n self.reg[reg_a] >>= self.reg[reg_b]\n elif op == \"XOR\":\n self.reg[reg_a] ^= self.reg[reg_b] \n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def ram_read(self, address):\n \"\"\"\n Function that reads from memory. Takes in the address to read.\n Returns the value stored at that address in the RAM.\n \"\"\"\n self.mar = address\n self.mdr = self.ram[self.mar]\n\n return self.mdr\n\n def ram_write(self, address, value):\n \"\"\"\n Function that writes to memory. Takes in the value to write,\n the address to write to. Saves value at the given address.\n \"\"\"\n self.mar = address\n self.ram[self.mar] = value\n self.mdr = self.ram[self.mar]\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n time_delta = 0\n start_time = datetime.now()\n while not self.halt: \n #self.trace() \n # print(datetime.now()) \n self.ir = self.ram_read(self.pc)\n self.operand_a = self.ram_read(self.pc + 1)\n self.operand_b = self.ram_read(self.pc + 2) \n\n # reset pc_override to check if PC needs to be adjusted \n self.pc_override = False\n\n # Determine if alu operation or branch_table operation\n bit_5 = (self.ir & 2 ** 5) >> 5\n\n if self.interrupt:\n current_time = datetime.now()\n time_delta = current_time - start_time\n if time_delta.total_seconds() >= 1:\n start_time = datetime.now()\n self.reg[6] |= 0b00000001\n if kbhit():\n self.ram_write(0xF4, ord(getch().decode('ascii')))\n self.reg[6] |= 0b00000010\n masked_interrupts = self.reg[5] & self.reg[6]\n for i in range(8): \n interrupt_happened = ((masked_interrupts >> i) & 1) == 1\n if interrupt_happened:\n self.ir = 0b01010010\n \n \n if bit_5:\n op = self.alu_table[self.ir]\n self.alu(op, self.operand_a, self.operand_b)\n \n if not bit_5:\n self.branch_table[self.ir]()\n\n if not self.pc_override:\n bit_6 = (self.ir & 2 ** 6) >> 6\n bit_7 = (self.ir & 2 ** 7) >> 7\n\n if bit_6: \n self.pc += 2\n if bit_7:\n self.pc += 3\n sys.exit()\n \n\n def LDI(self):\n \"\"\"\n Set register to this value\n \"\"\"\n self.reg[self.operand_a] = self.operand_b\n\n def PRN(self):\n \"\"\"\n Prints numeric value stored at register address\n \"\"\"\n print(self.reg[self.operand_a])\n\n def HLT(self):\n \"\"\"\n Sets halt value to true\n \"\"\"\n self.halt = True\n\n def PUSH(self, value=None):\n \"\"\"\n Pushes value at given register on to computer stack\n \"\"\"\n self.reg[7] -= 1\n if value is None:\n value = self.reg[self.operand_a]\n self.ram_write(self.reg[7], value)\n \n def POP(self, register=True):\n \"\"\"\n Pops value at current stack pointer off the stack \n and stores it at the given register\n \"\"\"\n value = self.ram_read(self.reg[7])\n if register:\n self.reg[self.operand_a] = value\n else:\n return value\n self.reg[7] += 1\n\n def CALL(self):\n \"\"\"\n Calls a subroutine (function) stored at the address in the register.\n \n The address of the instruction directly after CALL is pushed onto the stack. \n The PC is set to the address stored in the given register.\n \"\"\"\n self.pc_override = True\n self.PUSH(self.pc + 2)\n self.pc = self.reg[self.operand_a]\n\n def RET(self):\n \"\"\"\n Returns from subroutine.\n Pop the value from the top of the stack and store it in the PC.\n \"\"\"\n self.pc = self.POP(register=False)\n\n def INT(self):\n \"\"\"\n Issue the interrupt number stored in the given register.\n\n This will set the _n_th bit in the IS register to the value in the given register.\n \"\"\"\n self.pc_override = True\n # Disable further interrupts.\n self.interrupt = False\n # Clear the bit in the IS register.\n im_reg = self.reg[5]\n self.reg[6] = 0\n # The PC register is pushed on the stack.\n self.PUSH(self.pc)\n # The FL register is pushed on the stack.\n self.PUSH(self.fl)\n # Registers R0-R6 are pushed on the stack in that order.\n for i in range(7):\n self.PUSH(self.reg[i])\n # Set the PC is set to the handler address. \n self.pc = self.ram_read(self.interrupt_table[im_reg])\n\n\n def IRET(self):\n \"\"\"\n Return from an interrupt handler.\n\n The following steps are executed:\n\n Registers R6-R0 are popped off the stack in that order.\n The FL register is popped off the stack.\n The return address is popped off the stack and stored in PC.\n Interrupts are re-enabled\n \"\"\"\n self.pc_override = True\n # Registers R6-R0 are popped off the stack in that order.\n for i in range(7):\n self.reg[6 - i] = self.POP(register=False)\n # The FL register is popped off the stack.\n self.fl = self.POP(register=False)\n # The return address is popped off the stack and stored in PC.\n self.pc = self.POP(register=False)\n # Interrupts are re-enabled\n self.interrupt = True\n\n\n def JEQ(self):\n \"\"\"\n If equal flag is set (true), jump to the address stored in the given register.\n \"\"\"\n bit_0 = (self.fl & 2 ** 0) >> 0\n if bit_0:\n self.pc_override = True\n self.pc = self.reg[self.operand_a]\n \n def JGE(self):\n \"\"\"\n If greater-than flag or equal flag is set (true), \n jump to the address stored in the given register.\n \"\"\"\n bit_0 = (self.fl & 2 ** 0) >> 0\n bit_1 = (self.fl & 2 ** 1) >> 1\n\n if bit_0 or bit_1:\n self.pc_override = True\n self.pc = self.reg[self.operand_a]\n\n def JGT(self):\n \"\"\"\n If greater-than flag is set (true), jump to the address stored in the given register.\n \"\"\" \n bit_1 = (self.fl & 2 ** 1) >> 1\n\n if bit_1:\n self.pc_override = True\n self.pc = self.reg[self.operand_a]\n\n def JLE(self):\n \"\"\"\n If less-than flag or equal flag is set (true), jump to the address stored in the given register.\n \"\"\"\n bit_0 = (self.fl & 2 ** 0) >> 0\n bit_2 = (self.fl & 2 ** 2) >> 2\n\n if bit_0 or bit_2:\n self.pc_override = True\n self.pc = self.reg[self.operand_a]\n \n def JLT(self):\n \"\"\"\n If less-than flag is set (true), jump to the address stored in the given register.\n \"\"\"\n bit_2 = (self.fl & 2 ** 2) >> 2\n\n if bit_2:\n self.pc_override = True\n self.pc = self.reg[self.operand_a]\n\n def JMP(self):\n \"\"\"\n Jump to the address stored in the given register.\n \"\"\"\n self.pc_override = True\n self.pc = self.reg[self.operand_a]\n\n def JNE(self):\n \"\"\"\n If E flag is clear (false, 0), jump to the address stored in the given register.\n \"\"\"\n bit_0 = (self.fl & 2 ** 0) >> 0\n \n if not bit_0:\n self.pc_override = True\n self.pc = self.reg[self.operand_a]\n\n\n def LD(self):\n \"\"\"\n Loads registerA with the value at the memory address stored in registerB.\n\n This opcode reads from memory.\n \"\"\"\n self.reg[self.operand_a] = self.ram_read(self.reg[self.operand_b])\n\n def NOP(self):\n \"\"\"\n No operation. Do nothing for this instruction.\n \"\"\"\n pass\n\n def PRA(self):\n \"\"\"\n Print alpha character value stored in the given register.\n \"\"\"\n print(chr(self.reg[self.operand_a]))\n\n def ST(self):\n \"\"\"\n Store value in registerB in the address stored in registerA.\n This opcode writes to memory.\n \"\"\"\n self.ram_write(\n self.reg[self.operand_a],\n self.reg[self.operand_b]\n )\n\n\n\n\n", "sub_path": "ls8/cpu.py", "file_name": "cpu.py", "file_ext": "py", "file_size_in_byte": 13401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.datetime.now", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 189, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 207, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 207, "usage_type": "name"}, {"api_name": "msvcrt.kbhit", "line_number": 209, "usage_type": "call"}, {"api_name": "msvcrt.getch", "line_number": 210, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 234, "usage_type": "call"}]} +{"seq_id": "280432552", "text": "import math\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport pdb\nimport scipy \nfrom os import listdir\nimport os\nimport os.path\nimport glob\nfrom os.path import isfile, join\nimport subprocess\nimport re\n\nplt.style.use('seaborn-white')\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['font.serif'] = 'Times New Roman'\nplt.rcParams['font.monospace'] = 'Ubuntu Mono'\nplt.rcParams['font.size'] = 16\nplt.rcParams['font.weight'] = 'bold'\nplt.rcParams['axes.labelsize'] = 17\nplt.rcParams['axes.labelweight'] = 'normal'\nplt.rcParams['xtick.labelsize'] = 14\nplt.rcParams['ytick.labelsize'] = 14\nplt.rcParams['legend.fontsize'] = 16\n# plt.rcParams['axes.titlesize'] = 10\n# plt.rcParams['legend.fontweight'] = 'normal'\n# plt.rcParams['figure.titlesize'] = 12\n\nmy_dpi = 200\nfig_width = 9*200\nfig_height = 6*200\n\n# exp_folders = ['./experiments-Det-WAE-CELEB/15e26667130f427eb2ff4d402a85d3dc/',\n# './experiments-Det-WGANGP-CELEB/53ec8ff3559f40ddb8db6f9d6fd7aeea/',\n# './experiments-Det-PDWGAN-CELEB/0539390c52ce4a518385e957cd269641/']\n\n# exp_folders = ['./experiments-Det-WAE-CIFAR10/9511cff568144e19a8bd68a9d7f6715a/',\n# './experiments-Det-WGANGP-CIFAR10/e5742de883f6452e9a3a4960429ab391/',\n# './experiments-Det-PDWGAN-CIFAR10/4d08f5269c12452495a2e0d2089d22e5/']\n\n# exp_folders = ['./experiments-Det-WAE-FLOWERS/b1806193edef4ef396264cdf8ca0d432/',\n# './experiments-Det-WGANGP-FLOWERS/1909f4b9c7084c1ba04eebed71b49cd1/',\n# # './experiments-Det-PDWGAN-FLOWERS/c411520907d849c5a310a2962e0eff15/']\n# './experiments-Det-PDWGAN-FLOWERS/005eb4cef7e949e08ce48acfc3f46be3/']\n\n# exp_folders = ['./OlderExperiments/fourthSet/experiments-WAE-CIFAR10/60322e3ddc1b4c2d99d53b933e95543b/',\n# './OlderExperiments/fourthSet/experiments-WGANGP-CIFAR10/dfc7daabc5ed44abb88228e7d76b1083/',\n# './OlderExperiments/fourthSet/experiments-PDWGAN-CIFAR10/9de19b3af7c5444bb8dc1fea7b387820/']\n\n# exp_folders = ['./experiments-Det-WAE-CUB/c2733e89919f4baf8aa29064187ee428/',]\n\n# exp_folders = ['./OlderExperiments/fifthSet/experiments128-WAEVanilla-CIFAR10/107c24e9367f490fa5ffa2f2a49efb41/',\n# './experimentsLast-WGANGPCannon-CIFAR10/41f0e373518647429e5df8d6d690c819/',\n# './experimentsLast-PDWGANCannon-CIFAR10/aa525987da2d4feea0e7964b91530240/']\n\n# exp_folders = ['./experimentsLast-WGANGPCannon-CIFAR10/41f0e373518647429e5df8d6d690c819/',\n# './experimentsLast-PDWGANCannon-CIFAR10/aa525987da2d4feea0e7964b91530240/']\n\nexp_folders = ['./EEEexperimentsLast-WGANGPCannon-FLOWERS/44cc2a6852f94b58886539dd3db7c5ac/',\n './EEEexperimentsLast-PDWGANCannon-FLOWERS/6f403789c7df4571bc52dc28238be0bb/']\n\n# exp_folders = ['./EEEexperimentsLast-WGANGPCannon-CUB/b1cd63a4e204451eab898eceb83fc3f4/',\n# './EEEexperimentsLast-PDWGANCannon-CUB/ce3b05f0d1b44c81b6d01319f101c46f/']\n\nsave_index = -1\n# mode = 'LeastEpochs'\n# mode = 'Full'\nmode = 'EpochInterval'\nmin_epoch = 200\nmax_epoch = 1500\n# min_epoch = 0\n# max_epoch = 2300\nnames = ['Epochs', 'FID', 'Model Inc Mean', 'Model Inc Std', 'Real Inc Mean', 'Real Inc Std']\nlist_of_np_numerics = []\nfor exp_folder in exp_folders:\n file_path = exp_folder+'/test_inception_stats.txt'\n\n if os.path.exists(file_path): \n with open(file_path, \"r\") as text_file: data_lines = text_file.readlines()\n all_numeric_lines = []\n if len(data_lines)==1: \n data_lines_corrected = data_lines[0].split('Epoch')[1:]\n data_lines = ['Epoch'+e for e in data_lines_corrected]\n\n for data_line in data_lines:\n numerics = []\n for e in re.split(': | |\\n|',data_line):\n try: \n float_e = float(e)\n numerics.append(float_e)\n except: \n pass\n all_numeric_lines.append(numerics)\n np_all_numeric_lines = np.asarray(all_numeric_lines)\n list_of_np_numerics.append(np_all_numeric_lines)\n\n# identifiers = ['WAE', 'WGAN-GP', 'PD-WGAN', 'Train. Data']\n# colors = ['b', 'g', 'r', 'k', 'y']\n# markers = ['d', 'v','h', 'o', 's']\n\nidentifiers = ['WGAN-GP', 'PD-WGAN', 'Train. Data']\ncolors = ['g', 'r', 'k', 'y']\nmarkers = ['v', 'h', 'o', 's']\n\nif mode == 'LeastEpochs': \n least_max_epoch = 100000000000000\n for i, np_all_numeric_lines in enumerate(list_of_np_numerics):\n curr_max_epoch = np.max(np_all_numeric_lines[1:,0])\n if curr_max_epoch < least_max_epoch: least_max_epoch = curr_max_epoch\n\n# plt.figure(figsize=(fig_width/my_dpi, fig_height/my_dpi), dpi=my_dpi)\nplt.figure(figsize=(fig_width/(2*my_dpi), fig_height/my_dpi), dpi=my_dpi)\nplt.cla()\ny_label = 'Frechet Inception Distance (FID)'\nx_label = 'Training Epochs'\nmin_y_val = 100000000000000\nmax_y_val = -100000000000000\nfor i, np_all_numeric_lines in enumerate(list_of_np_numerics):\n if mode == 'LeastEpochs':\n mask = np_all_numeric_lines[1:,0]<=least_max_epoch\n x_vals = np_all_numeric_lines[1:,0][mask]\n y_vals = np_all_numeric_lines[1:,1][mask]\n elif mode == 'EpochInterval': \n mask_upper = np_all_numeric_lines[1:,0]<=max_epoch\n mask_lower = np_all_numeric_lines[1:,0]>=min_epoch \n mask = mask_upper*mask_lower\n x_vals = np_all_numeric_lines[1:,0][mask]\n y_vals = np_all_numeric_lines[1:,1][mask]\n else:\n x_vals = np_all_numeric_lines[1:,0]\n y_vals = np_all_numeric_lines[1:,1]\n if np.min(y_vals)max_y_val: max_y_val = np.max(y_vals)\n plt.plot(x_vals, y_vals, linewidth=2, linestyle='-', color=colors[i], label=identifiers[i], marker=markers[i], markersize=10)\n\ny_range = (max_y_val-min_y_val)\nplt.ylabel(y_label, fontsize=16)\nplt.xlabel(x_label, fontsize=16)\nplt.grid()\nplt.legend(frameon=True)\nplt.ylim((min_y_val-0.1*y_range, max_y_val+0.1*y_range ))\nplt.xlim((0,1000))\nplt.savefig(exp_folders[save_index]+'Visualization/fid_comparison.png', bbox_inches='tight', format='png', dpi=my_dpi, transparent=False)\nprint('Saving to path: ', exp_folders[save_index]+'Visualization/fid_comparison.png')\n\n\n# plt.figure(figsize=(fig_width/my_dpi, fig_height/my_dpi), dpi=my_dpi)\nplt.figure(figsize=(fig_width/(2*my_dpi), fig_height/my_dpi), dpi=my_dpi)\nplt.cla()\ny_label = 'Inception Score (IS)'\nx_label = 'Training Epochs'\nmin_y_val = 100000000000000\nmax_y_val = -100000000000000\nfor i, np_all_numeric_lines in enumerate(list_of_np_numerics):\n if mode == 'LeastEpochs':\n mask = np_all_numeric_lines[1:,0]<=least_max_epoch\n x_vals = np_all_numeric_lines[1:,0][mask]\n y_vals = np_all_numeric_lines[1:,2][mask]\n elif mode == 'EpochInterval': \n mask_upper = np_all_numeric_lines[1:,0]<=max_epoch\n mask_lower = np_all_numeric_lines[1:,0]>=min_epoch \n mask = mask_upper*mask_lower\n x_vals = np_all_numeric_lines[1:,0][mask]\n y_vals = np_all_numeric_lines[1:,2][mask]\n else:\n x_vals = np_all_numeric_lines[1:,0]\n y_vals = np_all_numeric_lines[1:,2]\n if np.min(y_vals)max_y_val: max_y_val = np.max(y_vals)\n plt.plot(x_vals, y_vals, linewidth=2, linestyle='-', color=colors[i], label=identifiers[i], marker=markers[i], markersize=10)\n\n# i=3\n# np_all_numeric_lines=list_of_np_numerics[0]\n# if mode == 'LeastEpochs':\n# mask = np_all_numeric_lines[1:,0]<=least_max_epoch\n# x_vals = np_all_numeric_lines[1:,0][mask]\n# y_vals = np_all_numeric_lines[1:,4][mask]\n# else:\n# x_vals = np_all_numeric_lines[1:,0]\n# y_vals = np_all_numeric_lines[1:,4]\n# if np.min(y_vals)max_y_val: max_y_val = np.max(y_vals)\n# plt.plot(x_vals, y_vals, linewidth=2, linestyle='-', color=colors[i], label=identifiers[i], marker=markers[i], markersize=10)\n\ny_range = (max_y_val-min_y_val)\nplt.ylabel(y_label, fontsize=16)\nplt.xlabel(x_label, fontsize=16)\nplt.grid()\nplt.legend(frameon=True)\nplt.ylim((min_y_val-0.1*y_range, max_y_val+0.4*y_range ))\nplt.xlim((0,1000))\nplt.savefig(exp_folders[save_index]+'Visualization/is_comparison.png', bbox_inches='tight', format='png', dpi=my_dpi, transparent=False)\nprint('Saving to path: ', exp_folders[save_index]+'Visualization/is_comparison.png')\n\nplt.close('all')\n\n\n\n", "sub_path": "ploter_3.py", "file_name": "ploter_3.py", "file_ext": "py", "file_size_in_byte": 8465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.use", "line_number": 3, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 23, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 25, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 26, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "re.split", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}]} +{"seq_id": "404339326", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\nimport sys\nimport logging\n\nfrom pptx import Presentation\nfrom pptx.util import Inches, Pt\nfrom pptx.dml.color import RGBColor\nfrom pptx.enum.text import PP_ALIGN\nfrom pptx.enum.text import MSO_ANCHOR, MSO_AUTO_SIZE\n\nfrom toml_parser import TOMLParser\n\nparser = TOMLParser()\nargs = sys.argv\nif len(args) == 1:\n raise Exception(\"Specify configuration file!\")\nelif len(args) >= 3:\n raise Exception(\"Too many configuration files is specified!\")\nparser.parse(sys.argv[1])\nconf = parser.dict_root\nproject = conf['global']['project']\nloglevel = conf['global']['loglevel']\n\nif loglevel == 'DEBUG':\n level_ = logging.DEBUG\nelif loglevel == 'INFO':\n level_ = logging.INFO\nelif loglevel == 'WARNING':\n level_ = logging.WARNING\nelif loglevel == 'ERROR':\n level_ = logging.ERROR\nelif loglevel == 'CRITCAL':\n level_ = logging.CRITCAL\n\nlogging.basicConfig(level = level_)\nlogger = logging.getLogger(__name__)\nlogger.info(project)\nproject_conf = conf[project]\nlogger.debug(project_conf)\n\nclass PPTXHandler(object):\n def __init__(self):\n\n self.prs = Presentation()\n\n self.dirpath_in = project_conf['dirpath_in']\n self.dirpath_out = project_conf['dirpath_out']\n self.delta_t = project_conf['delta_t']\n\n # Parameters\n self.ncols = project_conf['ncols']\n self.sizex = project_conf['sizex']\n self.sizey = project_conf['sizey']\n self.col_int = project_conf['col_int']\n self.row_int = project_conf['row_int'] \n self.col_sta = project_conf['col_sta'] \n self.row_sta = project_conf['row_sta'] \n\n self.left = project_conf['left']\n self.top = project_conf['top']\n self.width = project_conf['width']\n self.height = project_conf['height']\n\n self.title = project_conf['title']\n # self.str_suffix = project_conf['str_suffix']\n\n self.lst_slide = project_conf['slide']\n\n self.lst_fig_category = project_conf['lst_fig_category']\n\n def create_pptx(self):\n self.const_description_slides()\n self.const_data_slides()\n self.output_pptx()\n\n def output_pptx(self):\n dt_now = datetime.datetime.now()\n self.prs.save(u'{4}/{0:04d}{1:02d}{2:02d}_{3}.pptx'.format(\n dt_now.year, dt_now.month, dt_now.day, self.title, self.dirpath_out))\n\n def const_description_slides(self):\n for conf_slide in self.lst_slide:\n self.const_slide(conf_slide)\n\n def get_lst_dt(self, dt1, dt2, delta_t):\n period = dt2 - dt1\n days = period.days\n hours = period.seconds / 3600\n ngrids_time = (days * 24 + hours) / delta_t + 1\n td = datetime.timedelta(hours=delta_t)\n return [dt1 + td * i for i in range(ngrids_time)]\n\n def const_data_slides(self):\n for fig_category in self.lst_fig_category:\n self.fig_category = fig_category\n self.category_conf = project_conf['exec_cond_{1}'.format(project, fig_category)]\n lst_period = []\n for period_sta, period_end in zip(self.category_conf['lst_period_sta'], self.category_conf['lst_period_end']):\n lst_period.append([datetime.datetime.strptime(period_sta, '%Y%m%d%H'), datetime.datetime.strptime(period_end, '%Y%m%d%H')])\n for i, period in enumerate(lst_period):\n self.target = self.category_conf['targets'][i]\n self.care_for_each_period(period)\n for dt in self.get_lst_dt(period[0], period[1], self.delta_t):\n print(dt)\n self.loop(dt)\n\n def do_inner_proc(self, dt, slide):\n lst_fig_title = self.category_conf['lst_fig_title']\n for i, fig_type in enumerate(self.category_conf['lst_fig_type']):\n self.fig_dir = self.category_conf['lst_fig_dir'][i]\n self.prefix = self.category_conf['lst_prefix'][i]\n self.str_suffix = self.category_conf['lst_suffix'][i]\n icol = i % self.ncols\n irow = i / self.ncols\n\n if self.str_suffix == '{0:04d}{1:02d}{2:02d}{3:02d}.png':\n self.suffix = self.str_suffix.format(dt.year, dt.month, dt.day, dt.hour)\n elif self.str_suffix == '{3:02d}Z{2:02d}{1}{0:04d}.png':\n self.suffix = self.str_suffix.format(dt.year, dt.strftime(\"%b\").upper(), dt.day, dt.hour)\n elif self.str_suffix == '{0:02d}{1:02d}{2:02d}.png':\n dt_tmp = dt + datetime.timedelta(hours=11)\n self.suffix = self.str_suffix.format(dt_tmp.month, dt_tmp.day, dt_tmp.hour)\n\n if self.special_care(fig_type, icol, irow, dt, slide):\n continue\n\n if fig_type == \"-\":\n continue\n\n left = Inches(self.col_sta + self.col_int * icol)\n top = Inches(self.row_sta + self.row_int * irow)\n width = Inches(self.sizex)\n height = Inches(self.sizey)\n img_path = self.get_filepath(self.fig_dir, fig_type, self.suffix)\n slide.shapes.add_picture(img_path, left, top, height, width)\n\n top_figtitle = top - Inches(0.080)\n height_figtitle = Inches(0.4)\n txBox_figtitle = slide.shapes.add_textbox(left, top_figtitle, width, height_figtitle)\n txBox_figtitle.fill.solid()\n txBox_figtitle.fill.fore_color.rgb = RGBColor(0xFF, 0xFF, 0xFF)\n tf_figtitle = txBox_figtitle.text_frame\n tf_figtitle.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE\n tf_figtitle.text = lst_fig_title[i]\n p = tf_figtitle.paragraphs[0]\n p.alignment = PP_ALIGN.CENTER\n p.font.size = Pt(14)\n\n left_ = Inches(9.45-len(self.section_title)*0.09225)\n top_ = Inches(0.1)\n width_ = Inches(0.45+len(self.section_title)*0.0925)\n height_ = Inches(0.4)\n txBox = slide.shapes.add_textbox(left_, top_, width_, height_)\n txBox.line.color.rgb = RGBColor(0x00, 0x00, 0x00)\n tf = txBox.text_frame\n tf.text = self.section_title\n\n def const_slide(self, conf_slide):\n bullet_slide_layout = self.prs.slide_layouts[1]\n slide = self.prs.slides.add_slide(bullet_slide_layout)\n shapes = slide.shapes\n\n shapes.title.text = conf_slide['title']\n\n body_shape = shapes.placeholders[1]\n tf = body_shape.text_frame\n tf.text = conf_slide['text1']\n self.bullet(tf, conf_slide, 'text2', 'level2')\n self.bullet(tf, conf_slide, 'text3', 'level3')\n self.bullet(tf, conf_slide, 'text4', 'level4')\n self.bullet(tf, conf_slide, 'text5', 'level5')\n self.bullet(tf, conf_slide, 'text6', 'level6')\n self.bullet(tf, conf_slide, 'text7', 'level7')\n\n def bullet(self, tf, conf_slide, key_text, key_level):\n if key_text in conf_slide:\n p = tf.add_paragraph()\n p.text = conf_slide[key_text]\n p.level = conf_slide[key_level]\n\n def get_filepath(self, fig_dir, fig_type, suffix):\n if fig_type != '-':\n img_path = '{0}/{1}/{2}/{3}/{4}_{5}'.format(self.dirpath_in, self.fig_dir, fig_type, self.target, self.prefix, suffix)\n return img_path\n\n def loop(self, dt):\n title_only_slide_layout = self.prs.slide_layouts[5]\n\n slide = self.prs.slides.add_slide(title_only_slide_layout)\n shapes = slide.shapes\n shapes.title.text = dt.strftime('%Y/%m/%d %HUTC')\n self.do_inner_proc(dt, slide)\n\n def care_for_each_period(self, period):\n title_slide_layout = self.prs.slide_layouts[0]\n slide = self.prs.slides.add_slide(title_slide_layout)\n shapes = slide.shapes\n self.section_title = '{0} {1}'.format(self.fig_category, self.target)\n shapes.title.text = self.section_title\n days = (period[1] - period[0]).days\n hours = (period[1] - period[0]).seconds / 3600\n if days == 0:\n str_days = \"{}hours\".format(hours)\n else:\n str_days = \"{}days\".format(days)\n slide.placeholders[1].text = '{0} - {1} ({2})'.format(period[0].strftime(\n \"%Y/%m/%d %HUTC\"), period[1].strftime(\"%Y/%m/%d %HUTC\"), str_days)\n\n def special_care(self, fig_type, icol, irow, dt, slide):\n if fig_type[-1] == '_':\n left = Inches(self.left)\n top = Inches(self.top)\n width = Inches(self.width)\n height = Inches(self.height)\n img_path = '{0}/{1}/{2}/{3}/{4}_{5}'.format(self.dirpath_in, self.fig_dir, fig_type[:-1], self.target, self.prefix, self.suffix)\n # print(img_path)\n try:\n slide.shapes.add_picture(img_path, left, top, height, width)\n except IOError as e:\n pass\n return True\n else:\n return False\n return False\n\n def const_second_slide(self):\n pass\n\n\nif __name__ == '__main__':\n obj = PPTXHandler()\n obj.create_pptx()\n", "sub_path": "pptx_handler.py", "file_name": "pptx_handler.py", "file_ext": "py", "file_size_in_byte": 9009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "toml_parser.TOMLParser", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 29, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 31, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.CRITCAL", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "pptx.Presentation", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 103, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 125, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 134, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 135, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 136, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 137, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 141, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 142, "usage_type": "call"}, {"api_name": "pptx.dml.color.RGBColor", "line_number": 145, "usage_type": "call"}, {"api_name": "pptx.enum.text.MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pptx.enum.text.MSO_AUTO_SIZE", "line_number": 147, "usage_type": "name"}, {"api_name": "pptx.enum.text.PP_ALIGN.CENTER", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pptx.enum.text.PP_ALIGN", "line_number": 150, "usage_type": "name"}, {"api_name": "pptx.util.Pt", "line_number": 151, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 153, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 154, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 155, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 156, "usage_type": "call"}, {"api_name": "pptx.dml.color.RGBColor", "line_number": 158, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 215, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 216, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 217, "usage_type": "call"}, {"api_name": "pptx.util.Inches", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "265660737", "text": "# coding: utf-8\n\"\"\"\nCopyright (c) 2019 The MITRE Corporation.\n\"\"\"\n\n\nimport io\nimport json\nimport gluonnlp as nlp\nimport glob\nfrom gluonnlp.data import Counter\nfrom multiprocessing import Pool, cpu_count\nfrom mantichora import mantichora\nfrom atpbar import atpbar\nimport threading\nimport logging\n\nfrom tmnt.preprocess import BasicTokenizer\n\n__all__ = ['JsonVectorizer', 'TextVectorizer']\n\n\nclass Vectorizer(object):\n\n def __init__(self, custom_stop_word_file=None, encoding='utf-8'):\n self.encoding = encoding\n self.tokenizer = BasicTokenizer(use_stop_words=True)\n\n def get_counter_dir_parallel(self, data_dir, pat):\n raise NotImplementedError('Vectorizer must be instantiated as TextVectorizer or JsonVectorizer')\n\n def vectorize_fn(self, file_and_vocab):\n raise NotImplementedError('Vectorizer fn must be specified by concrete subclass')\n \n def get_vocab(self, counter, size):\n vocab = nlp.Vocab(counter, unknown_token=None, padding_token=None,\n bos_token=None, eos_token=None, min_freq=5, max_size=size)\n return vocab\n\n\n def chunks(self, l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n def task_vec_fn(self, name, files):\n sp_vecs = []\n for i in atpbar(range(len(files)), name=name):\n sp_vecs.append(self.vectorize_fn(files[i]))\n return sp_vecs\n\n\n def get_sparse_vecs(self, sp_out_file, vocab_out_file, data_dir, vocab_size=2000, i_vocab=None, full_histogram_file=None, pat='*.json'):\n files = glob.glob(data_dir + '/' + pat)\n if i_vocab is None:\n counter = self.get_counter_dir_parallel(data_dir, pat)\n vocab = self.get_vocab(counter, vocab_size)\n else:\n vocab = i_vocab\n files_and_vocab = [(f,vocab) for f in files]\n if len(files_and_vocab) > 2:\n file_batches = list(self.chunks(files_and_vocab, max(1, len(files_and_vocab) // cpu_count())))\n with mantichora() as mcore:\n for i in range(len(file_batches)):\n mcore.run(self.task_vec_fn,\"Vectorizing Batch {}\".format(i), file_batches[i])\n sp_vecs = mcore.returns()\n sp_vecs = [ item for sl in sp_vecs for item in sl ]\n else:\n sp_vecs = map(self.vectorize_fn, files_and_vocab)\n sp_list = list(sp_vecs)\n with io.open(sp_out_file, 'w', encoding=self.encoding) as fp:\n for block in sp_list:\n for (v,l) in block:\n fp.write(str(l)) \n for (i,c) in v:\n fp.write(' ')\n fp.write(str(i))\n fp.write(':')\n fp.write(str(c))\n fp.write('\\n')\n if i_vocab is None: ## print out vocab if we had to create it\n with io.open(vocab_out_file, 'w', encoding=self.encoding) as fp:\n for i in range(len(vocab.idx_to_token)):\n fp.write(vocab.idx_to_token[i])\n fp.write('\\n')\n if full_histogram_file:\n with io.open(full_histogram_file, 'w', encoding=self.encoding) as fp:\n items = list(counter.items())\n items.sort(key=lambda x: -x[1])\n for k,v in items:\n fp.write(str(k))\n fp.write(' ')\n fp.write(str(v))\n fp.write('\\n')\n return vocab\n\n\nclass JsonVectorizer(Vectorizer):\n\n def __init__(self, custom_stop_word_file=None, text_key='body', label_key=None, min_doc_size=6, label_prefix=-1,\n encoding='utf-8'):\n super(JsonVectorizer, self).__init__(custom_stop_word_file, encoding=encoding)\n self.encoding = encoding\n self.text_key = text_key\n self.label_key = label_key\n self.label_prefix = label_prefix\n self.min_doc_size = min_doc_size\n\n def get_counter_file(self, json_file):\n counter = None\n with io.open(json_file, 'r', encoding=self.encoding) as fp:\n for l in fp:\n js = json.loads(l)\n txt = js[self.text_key] ## text field\n counter = nlp.data.count_tokens(self.tokenizer.tokenize(txt), counter = counter)\n return counter\n\n def task(self, name, files):\n counters = []\n for i in atpbar(range(len(files)), name=name):\n counters.append(self.get_counter_file(files[i]))\n return counters\n\n def get_counter_dir_parallel(self, data_dir, pat):\n files = glob.glob(data_dir + '/' + pat)\n if len(files) > 2:\n file_batches = list(self.chunks(files, max(1, len(files) // cpu_count())))\n logging.info(\"Counting vocabulary over {} text files with {} batches\".format(len(files), len(file_batches)))\n with mantichora() as mcore:\n for i in range(len(file_batches)):\n mcore.run(self.task,\"Counting Vocab Items - Batch {}\".format(i), file_batches[i])\n counter_cs = mcore.returns()\n counters = [ item for sl in counter_cs for item in sl ]\n else:\n counters = map(self.get_counter_file, files)\n return sum(counters, Counter())\n\n def vectorize_fn(self, file_and_vocab):\n json_file, vocab = file_and_vocab\n sp_vecs = []\n with io.open(json_file, 'r', encoding=self.encoding) as fp:\n for l in fp:\n js = json.loads(l)\n toks = self.tokenizer.tokenize(js[self.text_key])\n try:\n lstr = js[self.label_key]\n if self.label_prefix > 0:\n label_str = lstr[:self.label_prefix]\n else:\n label_str = lstr\n except KeyError:\n label_str = \"\"\n tok_ids = [vocab[token] for token in toks if token in vocab]\n if (len(tok_ids) >= self.min_doc_size):\n cnts = nlp.data.count_tokens(tok_ids)\n sp_vecs.append((sorted(cnts.items()), label_str))\n return sp_vecs\n\n\nclass TextVectorizer(Vectorizer):\n\n def __init__(self, custom_stop_word_file=None, min_doc_size=6, encoding='utf-8'):\n super(TextVectorizer, self).__init__(custom_stop_word_file, encoding=encoding)\n self.min_doc_size = min_doc_size\n\n \n def task(self, name, files):\n counters = []\n for i in atpbar(range(len(files)), name=name):\n counters.append(self.get_counter_file_batch(files[i]))\n return counters\n\n\n def get_counter_dir_parallel(self, txt_dir, pat='*.txt'):\n def batches(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]\n files = glob.glob(txt_dir + '/' + pat)\n batch_size = max(1, int(len(files) / 20))\n file_batches = list(batches(files, batch_size))\n if len(file_batches) > 2:\n file_batch_batches = list(self.chunks(file_batches, max(1, len(files) // cpu_count())))\n with mantichora() as mcore:\n for i in range(len(file_batch_batches)):\n mcore.run(self.task,\"Counting Vocab Items - Batch {}\".format(i), file_batch_batches[i])\n counter_cs = mcore.returns()\n counters = [ item for sl in counter_cs for item in sl ]\n else:\n counters = map(self.get_counter_file_batch, file_batches) \n return sum(counters, Counter())\n\n\n def get_counter_file_batch(self, txt_file_batch):\n counter = Counter()\n for txt_file in txt_file_batch:\n with io.open(txt_file, 'r', encoding=self.encoding) as fp:\n for txt in fp:\n counter = nlp.data.count_tokens(self.tokenizer.tokenize(txt), counter = counter)\n return counter\n\n\n def vectorize_fn(self, txt_file_and_vocab):\n txt_file, vocab = txt_file_and_vocab\n sp_vecs = []\n with io.open(txt_file, 'r', encoding=self.encoding) as fp:\n doc_tok_ids = []\n for txt in fp:\n toks = self.tokenizer.tokenize(txt)\n tok_ids = [vocab[token] for token in toks if token in vocab]\n doc_tok_ids.extend(tok_ids)\n if (len(doc_tok_ids) >= self.min_doc_size):\n cnts = nlp.data.count_tokens(doc_tok_ids)\n sp_vecs.append((sorted(cnts.items()), \"\"))\n return sp_vecs\n\n \n", "sub_path": "tmnt/preprocess/vectorizer.py", "file_name": "vectorizer.py", "file_ext": "py", "file_size_in_byte": 8547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "tmnt.preprocess.BasicTokenizer", "line_number": 27, "usage_type": "call"}, {"api_name": "gluonnlp.Vocab", "line_number": 36, "usage_type": "call"}, {"api_name": "atpbar.atpbar", "line_number": 48, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 54, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 62, "usage_type": "call"}, {"api_name": "mantichora.mantichora", "line_number": 63, "usage_type": "call"}, {"api_name": "io.open", "line_number": 71, "usage_type": "call"}, {"api_name": "io.open", "line_number": 82, "usage_type": "call"}, {"api_name": "io.open", "line_number": 87, "usage_type": "call"}, {"api_name": "io.open", "line_number": 111, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 113, "usage_type": "call"}, {"api_name": "gluonnlp.data.count_tokens", "line_number": 115, "usage_type": "call"}, {"api_name": "gluonnlp.data", "line_number": 115, "usage_type": "attribute"}, {"api_name": "atpbar.atpbar", "line_number": 120, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 125, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 127, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 128, "usage_type": "call"}, {"api_name": "mantichora.mantichora", "line_number": 129, "usage_type": "call"}, {"api_name": "gluonnlp.data.Counter", "line_number": 136, "usage_type": "call"}, {"api_name": "io.open", "line_number": 141, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 143, "usage_type": "call"}, {"api_name": "gluonnlp.data.count_tokens", "line_number": 155, "usage_type": "call"}, {"api_name": "gluonnlp.data", "line_number": 155, "usage_type": "attribute"}, {"api_name": "atpbar.atpbar", "line_number": 169, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 178, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 182, "usage_type": "call"}, {"api_name": "mantichora.mantichora", "line_number": 183, "usage_type": "call"}, {"api_name": "gluonnlp.data.Counter", "line_number": 190, "usage_type": "call"}, {"api_name": "gluonnlp.data.Counter", "line_number": 194, "usage_type": "call"}, {"api_name": "io.open", "line_number": 196, "usage_type": "call"}, {"api_name": "gluonnlp.data.count_tokens", "line_number": 198, "usage_type": "call"}, {"api_name": "gluonnlp.data", "line_number": 198, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 205, "usage_type": "call"}, {"api_name": "gluonnlp.data.count_tokens", "line_number": 212, "usage_type": "call"}, {"api_name": "gluonnlp.data", "line_number": 212, "usage_type": "attribute"}]} +{"seq_id": "36172070", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport pytest\nimport os\nfrom humilis.layer import Layer\nfrom humilis.environment import Environment\nfrom humilis.cloudformation import CloudFormation\nfrom humilis.ec2 import EC2\nimport humilis.config as config\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef cf():\n yield CloudFormation()\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef ec2():\n yield EC2()\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef humilis_testkey(ec2):\n # Create a keypair used for testing purposes\n created = ec2.create_key_pair(config.test_key)\n yield config.test_key\n if created:\n ec2.delete_key_pair(config.test_key)\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef humilis_example_environment():\n yield os.path.join('examples', 'example-environment.yml')\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef humilis_environment(humilis_example_environment):\n yield Environment(humilis_example_environment)\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef humilis_vpc_layer(cf, humilis_environment):\n layer = Layer(humilis_environment, 'vpc')\n yield layer\n cf.delete_stack(layer.name)\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef humilis_instance_layer(cf, humilis_environment, humilis_testkey):\n layer = Layer(humilis_environment, 'instance', keyname=humilis_testkey)\n yield layer\n cf.delete_stack(layer.name)\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef humilis_named_instance_layer(cf, humilis_environment, humilis_testkey):\n layer = Layer(humilis_environment, 'namedinstance',\n keyname=humilis_testkey)\n yield layer\n cf.delete_stack(layer.name)\n\n\ndef test_create_layer_object(humilis_environment, humilis_vpc_layer):\n layer = humilis_vpc_layer\n assert layer.relname == 'vpc'\n assert layer.name == \"{}-vpc\".format(humilis_environment.name)\n assert len(layer.yaml_params) == 2\n assert layer.yaml_params['vpc_cidr']['value'] == '10.0.0.0/16'\n assert layer.tags.get('humilis-layer') == layer.name\n assert layer.tags.get('humilis-environment') == humilis_environment.name\n\n\ndef test_layer_not_already_in_aws(humilis_vpc_layer):\n layer = humilis_vpc_layer\n assert not layer.already_in_cf\n\n\ndef test_get_section_files(humilis_vpc_layer):\n assert len(humilis_vpc_layer.get_section_files('resources')) == 2\n assert len(humilis_vpc_layer.get_section_files('meta')) == 1\n assert len(humilis_vpc_layer.get_section_files('invalid')) == 0\n\n\ndef test_load_section(humilis_vpc_layer):\n files = humilis_vpc_layer.get_section_files('resources')\n data = humilis_vpc_layer.load_section('resources', files)\n assert all(res in data for res in ['AttachGateway', 'Subnet'])\n\n\ndef test_compile_template(humilis_vpc_layer):\n cf_template = humilis_vpc_layer.compile()\n assert 'VPC' in cf_template['Resources'] and \\\n 'InternetGateway' in cf_template['Resources'] and \\\n 'Description' in cf_template and \\\n len(cf_template['Description']) > 0\n\n\ndef test_create_and_delete_stack(cf, humilis_vpc_layer):\n \"\"\"Creates a sample stack in CF\"\"\"\n # Make sure the stack wasn't there already\n assert not cf.stack_exists(humilis_vpc_layer.name)\n\n # Create the stack, and make sure it has been pushed to CF\n cf_template = humilis_vpc_layer.create()\n assert isinstance(cf_template, dict)\n assert cf.stack_ok(humilis_vpc_layer.name)\n\n # Delete the stack\n humilis_vpc_layer.delete()\n assert not cf.stack_exists(humilis_vpc_layer.name)\n\n\ndef test_create_stack_lacking_dependencies(cf, humilis_instance_layer):\n \"\"\"Attempts to create a stack lacking dependencies: exception\"\"\"\n assert not cf.stack_exists(humilis_instance_layer.name)\n # Should simply skip the layer since dependencies are not met\n humilis_instance_layer.create()\n assert not cf.stack_exists(humilis_instance_layer.name)\n\n\ndef test_create_dependant_stack(cf, humilis_vpc_layer, humilis_instance_layer):\n \"\"\"Creates two stacks, the second depending on the first\"\"\"\n assert not cf.stack_exists(humilis_vpc_layer.name)\n humilis_vpc_layer.create()\n assert cf.stack_ok(humilis_vpc_layer.name)\n humilis_instance_layer.create()\n assert cf.stack_ok(humilis_instance_layer.name)\n humilis_instance_layer.delete()\n assert not cf.stack_exists(humilis_instance_layer.name)\n humilis_vpc_layer.delete()\n assert not cf.stack_exists(humilis_vpc_layer.name)\n\n\ndef test_create_namedinstance_stack(cf, humilis_vpc_layer,\n humilis_named_instance_layer):\n \"\"\"Creates an instance whose AMI uses a reference to the AMI tags\"\"\"\n assert not cf.stack_exists(humilis_vpc_layer.name)\n humilis_vpc_layer.create()\n assert cf.stack_ok(humilis_vpc_layer.name)\n humilis_named_instance_layer.create()\n assert cf.stack_ok(humilis_named_instance_layer.name)\n humilis_named_instance_layer.delete()\n assert not cf.stack_exists(humilis_named_instance_layer.name)\n humilis_vpc_layer.delete()\n assert not cf.stack_exists(humilis_vpc_layer.name)\n", "sub_path": "tests/test_layer.py", "file_name": "test_layer.py", "file_ext": "py", "file_size_in_byte": 5016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "humilis.cloudformation.CloudFormation", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.yield_fixture", "line_number": 14, "usage_type": "call"}, {"api_name": "humilis.ec2.EC2", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.yield_fixture", "line_number": 19, "usage_type": "call"}, {"api_name": "humilis.config.test_key", "line_number": 27, "usage_type": "attribute"}, {"api_name": "humilis.config", "line_number": 27, "usage_type": "name"}, {"api_name": "humilis.config.test_key", "line_number": 28, "usage_type": "attribute"}, {"api_name": "humilis.config", "line_number": 28, "usage_type": "name"}, {"api_name": "humilis.config.test_key", "line_number": 30, "usage_type": "attribute"}, {"api_name": "humilis.config", "line_number": 30, "usage_type": "name"}, {"api_name": "pytest.yield_fixture", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pytest.yield_fixture", "line_number": 33, "usage_type": "call"}, {"api_name": "humilis.environment.Environment", "line_number": 40, "usage_type": "call"}, {"api_name": "pytest.yield_fixture", "line_number": 38, "usage_type": "call"}, {"api_name": "humilis.layer.Layer", "line_number": 45, "usage_type": "call"}, {"api_name": "pytest.yield_fixture", "line_number": 43, "usage_type": "call"}, {"api_name": "humilis.layer.Layer", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.yield_fixture", "line_number": 50, "usage_type": "call"}, {"api_name": "humilis.layer.Layer", "line_number": 59, "usage_type": "call"}, {"api_name": "pytest.yield_fixture", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "315667061", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pytest\nfrom mrsimulator import Site\nfrom mrsimulator import SpinSystem\nfrom mrsimulator.method.utils import query_permutations\n\n__author__ = \"Maxwell Venetos\"\n__email__ = \"mvenetos@berkeley.edu\"\n\nH1 = Site(isotope=\"1H\", shielding_symmetric=dict(zeta=50, eta=0))\nSi29 = Site(isotope=\"29Si\", shielding_symmetric=dict(zeta=50, eta=0))\nO17 = Site(isotope=\"17O\", quadrupolar=dict(Cq=50, eta=0))\n\n\ndef check_equal(res, other):\n assert res.shape == np.asarray(other).shape\n\n for item in other:\n assert item in res\n\n\ndef basic_transition_query_tests(iso):\n # Single site tests\n test_1 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1]]}},\n isotope=iso[0].get_isotopes(symbol=True),\n channel=[\"1H\"],\n )\n check_equal(test_1, [[-1.0]])\n\n test_2 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1], [1]]}},\n isotope=iso[0].get_isotopes(symbol=True),\n channel=[\"1H\"],\n )\n check_equal(test_2, [[-1.0], [1.0]])\n\n # Multi sites same channel tests\n test_3 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1, 1]]}},\n isotope=iso[1].get_isotopes(symbol=True),\n channel=[\"1H\"],\n )\n test_3_check = [\n [-1.0, 0.0, 1.0],\n [0.0, -1.0, 1.0],\n [1.0, 0.0, -1.0],\n [-1.0, 1.0, 0.0],\n [1.0, -1.0, 0.0],\n [0.0, 1.0, -1.0],\n ]\n check_equal(test_3, test_3_check)\n\n # Multi sites same channel tests\n test_4 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1, -1]]}},\n isotope=iso[1].get_isotopes(symbol=True),\n channel=[\"1H\"],\n )\n test_4_check = [\n [0.0, -1.0, -1.0],\n [-1.0, -1.0, 0.0],\n [-1.0, 0.0, -1.0],\n ]\n check_equal(test_4, test_4_check)\n\n # Multi sites same channel tests\n test_5 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"17O\"],\n )\n check_equal(test_5, [[0.0, -1.0, 0.0]])\n\n test_6 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1]], \"channel-2\": [[2]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"29Si\", \"17O\"],\n )\n test_6_check = [[-1.0, 2.0, 0.0], [0.0, 2.0, -1.0]]\n check_equal(test_6, test_6_check)\n\n # test by swapping the channels and channel query\n test_7 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1, -1], [1]], \"channel-2\": [[2]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"29Si\", \"17O\"],\n )\n test_7_check = [[-1.0, 2.0, -1.0], [1.0, 2.0, 0.0], [0.0, 2.0, 1.0]]\n check_equal(test_7, test_7_check)\n\n test_7 = query_permutations(\n query={\"P\": {\"channel-1\": [[2]], \"channel-2\": [[-1, -1], [1]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"17O\", \"29Si\"],\n )\n check_equal(test_7, test_7_check)\n\n test_7 = query_permutations(\n query={\"P\": {\"channel-2\": [[-1, -1], [1]], \"channel-1\": [[2]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"17O\", \"29Si\"],\n )\n check_equal(test_7, test_7_check)\n\n test_7 = query_permutations(\n query={\"P\": {\"channel-2\": [[2]], \"channel-1\": [[-1, -1], [1]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"29Si\", \"17O\"],\n )\n check_equal(test_7, test_7_check)\n\n test_8 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1, -1], [1]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"29Si\", \"27Al\"],\n )\n test_8_check = []\n check_equal(test_8, test_8_check)\n\n test_9 = query_permutations(\n query={\"P\": {\"channel-1\": [[-1, -1], [1]], \"channel-2\": [[2]]}},\n isotope=iso[2].get_isotopes(symbol=True),\n channel=[\"29Si\", \"27Al\"],\n )\n test_9_check = []\n check_equal(test_9, test_9_check)\n\n\ndef test_transition_query():\n iso = [\n SpinSystem(sites=[H1]),\n SpinSystem(sites=[H1, H1, H1]),\n SpinSystem(sites=[Si29, O17, Si29]),\n ]\n basic_transition_query_tests(iso)\n\n\ndef test_two_site():\n # sys = SpinSystem(sites=[H1])\n with pytest.raises(ValueError, match=\".*The length of the transition query*\"):\n query_permutations(\n query={\"P\": {\"channel-1\": [[-1, 1]]}},\n isotope=[\"1H\"],\n channel=[\"1H\"],\n )\n", "sub_path": "src/mrsimulator/method/tests/test_query_permutation.py", "file_name": "test_query_permutation.py", "file_ext": "py", "file_size_in_byte": 4362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "mrsimulator.Site", "line_number": 11, "usage_type": "call"}, {"api_name": "mrsimulator.Site", "line_number": 12, "usage_type": "call"}, {"api_name": "mrsimulator.Site", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 17, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 25, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 32, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 40, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 56, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 69, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 76, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 85, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 93, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 100, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 107, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 114, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 122, "usage_type": "call"}, {"api_name": "mrsimulator.SpinSystem", "line_number": 133, "usage_type": "call"}, {"api_name": "mrsimulator.SpinSystem", "line_number": 134, "usage_type": "call"}, {"api_name": "mrsimulator.SpinSystem", "line_number": 135, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 142, "usage_type": "call"}, {"api_name": "mrsimulator.method.utils.query_permutations", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "516223823", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport datetime\r\nimport logging\r\nimport pdb\r\n\r\nfrom openerp import api, fields, models\r\nfrom openerp.exceptions import Warning\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass RegistroDeViaticos(models.Model):\r\n _name = 'viatico.registro'\r\n\r\n def _get_solic_id(self):\r\n return self.env.uid\r\n\r\n def _datetime_now(self):\r\n return datetime.datetime.now()\r\n\r\n def _get_department(self, solicitante_id):\r\n depart = self.env['hr.employee'].search(\r\n [('user_id', '=', solicitante_id)]).department_id.id\r\n\r\n return depart\r\n\r\n\r\n secuencia = fields.Char(string='Secuencia', readonly=True)\r\n\r\n solicitante = fields.Many2one('res.users', string='Solicitante',\r\n default=_get_solic_id, readonly=True)\r\n departamento = fields.Many2one('hr.department', readonly=True)\r\n\r\n fecha_solicitud = fields.Datetime(readonly=True, default=_datetime_now)\r\n fecha_viaje_desde = fields.Datetime(string='Desde')\r\n fecha_viaje_hasta = fields.Datetime(string='Hasta')\r\n\r\n tipo = fields.Selection((('viatico', 'Viatico'), ('dieta', 'Dieta')),\r\n default='viatico', required=True)\r\n zonas_id = fields.Many2one('viatico.zonas', string='Zona', required=True)\r\n escala_id = fields.Many2one('viatico.escala', string='Escala',\r\n required=True)\r\n\r\n vehiculo = fields.Many2one('fleet.vehicle', string='Vehiculo', )\r\n vehiculo_en_zona = fields.Boolean(default=False)\r\n\r\n registro_lines = fields.One2many('viatico.registro.line', 'registro_id',\r\n required=True)\r\n\r\n total = fields.Float(compute='_onchange_lineas')\r\n\r\n state = fields.Selection((('solic', 'Solicitado'),\r\n ('confirm', 'Confirmado'),\r\n ('transp', 'Transporte Asignado'),\r\n ('val', 'Validado'),\r\n ('completado', 'Completado'),\r\n ('cancel', 'Cancelado')),\r\n default='solic', string='Estado')\r\n\r\n stage = fields.Selection((('solic', 'Esperando Confirmacion'),\r\n (\r\n 'confirm', 'Esperando Asignacion de Transporte'),\r\n ('transp', 'Esperando Validacion'),\r\n ('val', 'Completado')),\r\n default='solic', string='Etapa')\r\n\r\n notas = fields.Text(string='Notas')\r\n\r\n solicitado_por = fields.Many2one('res.users', string='Solicitado por',\r\n readonly=True)\r\n confirmado_por = fields.Many2one('res.users', string='Confirmado por',\r\n readonly=True)\r\n tranps_por = fields.Many2one('res.users', string='Asig. de Transporte por',\r\n readonly=True)\r\n validado_por = fields.Many2one('res.users', string='Valiado por',\r\n readonly=True)\r\n cancelado_por = fields.Many2one('res.users', string='Cancelado por',\r\n readonly=True)\r\n\r\n @api.onchange('registro_lines')\r\n def _onchange_lineas(self):\r\n total = 0\r\n for line in self.registro_lines:\r\n total += sum([line.desayuno, line.almuerzo, line.cena,\r\n line.alojamiento, line.dia_completo, line.gasto_imp,\r\n line.gasto_estra])\r\n\r\n self.total = total\r\n\r\n @api.onchange('escala_id')\r\n def _onchange_escala(self):\r\n zonas = self.env['viatico.zonas'].search([\r\n ('escala_id', '=', self.escala_id.id)])\r\n if zonas:\r\n value = [x.id for x in zonas]\r\n self.zonas_id = False\r\n return {'domain': {'zonas_id': [('id', 'in', value)]}}\r\n\r\n @api.onchange('solicitante')\r\n def _onchange_solicitante(self):\r\n depart = self.env['hr.employee'].search(\r\n [('user_id', '=', self.solicitante.id)]).department_id.id\r\n\r\n self.departamento = depart\r\n\r\n @api.onchange('vehiculo')\r\n def _onchange_vehiculo(self):\r\n esta = False\r\n for line in self.zonas_id.vehiculos_ids:\r\n if line.vehiculo_id.id == self.vehiculo.id:\r\n esta = True\r\n self.vehiculo_en_zona = esta\r\n\r\n @api.multi\r\n def confirm(self):\r\n self.write({'state': 'confirm', 'stage': 'confirm',\r\n 'confirmado_por': self.env.uid})\r\n\r\n @api.multi\r\n def validar(self):\r\n peaje = 0\r\n for line in self.zonas_id.vehiculos_ids:\r\n if line.vehiculo_id.id == self.vehiculo.id:\r\n peaje = line.peaje\r\n concepto = 'Documento Origen: %s\\n\\n' % self.secuencia\r\n concepto += \"Fecha desde: %s \\t hasta: %s \\t\\t\\t\\t Lugar: %s \\t\\t\\t\\t Tipo: %s\\n\\n\" % (\r\n self.fecha_viaje_desde, self.fecha_viaje_hasta, self.zonas_id.name,\r\n self.tipo.upper())\r\n\r\n for line in self.registro_lines:\r\n concepto += '%s\\n' % line.employee_id.name\r\n concepto += '\\tDesayuno: RD$ %d | Almuerzo: RD$ %d | Cena: RD$ %d' % (\r\n line.desayuno, line.almuerzo, line.cena\r\n )\r\n if self.tipo == 'viatico':\r\n concepto += ' | Alojamiento: RD$ %d |Gasto Extra.: RD$ %d | Gasto Impuestos.: RD$ %d' % (\r\n line.alojamiento, line.gasto_estra, line.gasto_imp)\r\n concepto += '\\n'\r\n\r\n concepto += '\\nPEAJE: RD$ %d\\n\\n' % peaje\r\n concepto += self.notas + '\\n' if self.notas else \"\"\r\n\r\n\r\n cjc_obj = self.env['cjc_request.cjc_request']\r\n dep_id = self.env['hr.employee'].search(\r\n [('user_id', '=', self.solicitante.id)]).department_id.id\r\n\r\n cjc_id = cjc_obj.create({\r\n 'usuario': self.solicitante.id,\r\n 'solicitado_por': self.solicitante.id,\r\n 'departamento': dep_id,\r\n 'monto_solicitado': self.total + peaje,\r\n 'concepto': concepto,\r\n 'tipo': 'via',\r\n # 'viatico_registro_id': self.id,\r\n })\r\n cjc_id.write({'state': 'validar1', 'stage': 'validar1', 'validado1_por': self.confirmado_por.id})\r\n self.write(\r\n {'state': 'val', 'stage': 'val', 'validado_por': self.env.uid})\r\n return True\r\n\r\n @api.multi\r\n def cancel(self):\r\n self.write({'state': 'cancel', 'cancelado_por': self.env.uid})\r\n\r\n @api.multi\r\n def asignacion(self):\r\n self.write({'state': 'transp', 'stage': 'transp',\r\n 'tranps_por': self.env.uid})\r\n\r\n @api.model\r\n def create(self, values):\r\n values['departamento'] = self._get_department(self._get_solic_id())\r\n values['secuencia'] = self.env['ir.sequence'].get('viatico.registro')\r\n values['solicitado_por'] = self.env.uid\r\n\r\n return super(RegistroDeViaticos, self).create(values)\r\n\r\n\r\nclass RegistroDeViaticosLines(models.Model):\r\n _name = 'viatico.registro.line'\r\n\r\n def str_to_datetime(self, str_date):\r\n \"\"\"\r\n Convierte fechas dadas en formato a formato .\r\n\r\n El parametro str_date puede ser una fecha o una lista de fechas.\r\n : param str_date: '2016/2/14' or ['2016/2/14', '2016/2/27']\r\n : return: datetime(2016, 2. 14) or [datetime(2016, 2, 14), ...]\r\n \"\"\"\r\n\r\n if isinstance(str_date, list):\r\n fechas = []\r\n for date in str_date:\r\n fecha = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\r\n fecha = datetime.date(fecha.year, fecha.month, fecha.day)\r\n fechas.append(fecha)\r\n\r\n return fechas\r\n\r\n elif isinstance(str_date, str):\r\n str_date = str_date[: 10]\r\n fecha = datetime.datetime.strptime(str_date, '%Y-%m-%d')\r\n fecha = datetime.date(fecha.year, fecha.month, fecha.day)\r\n\r\n return fecha\r\n\r\n registro_id = fields.Many2one('viatico.registro')\r\n\r\n employee_id = fields.Many2one('hr.employee', string='Empleado',\r\n required=True)\r\n job_id = fields.Many2one(related='employee_id.job_id', string='Posicion',\r\n readonly=True)\r\n\r\n concepto_id = fields.Many2one('viatico.conceptos', string='Concepto',\r\n required=True)\r\n # tipo = fields.Selection(related='registro_id.tipo')\r\n # zonas_id = fields.Many2one(related='registro_id.zonas_id', string='Zona')\r\n # escala_id = fields.Many2one(related='registro_id.escala_id', string='Escala')\r\n\r\n pay_desayuno = fields.Boolean(defaulr=False, string='pagar desayuno?')\r\n pay_almuerzo = fields.Boolean(defaulr=False, string='pagar almuerzo?')\r\n pay_cena = fields.Boolean(defaulr=False, string='pagar cena?')\r\n pay_alojamiento = fields.Boolean(defaulr=False,\r\n string='pagar alojamiento?')\r\n pay_dia_completo = fields.Boolean(defaulr=False,\r\n string='pagar dia_completo?')\r\n pay_gasto_estra = fields.Boolean(default=False,\r\n string='pagar gastos Extraordinario?')\r\n pay_gasto_imp = fields.Boolean(default=False,\r\n string='pagar gastos impuesto?')\r\n\r\n desayuno = fields.Float(string='Monto Desayuno', readonly=True)\r\n almuerzo = fields.Float(string='Monto Almuerzo', readonly=True)\r\n cena = fields.Float(string='Monto Cena', readonly=True)\r\n alojamiento = fields.Float(string='Monto Alojamiento', readonly=True)\r\n dia_completo = fields.Float(string='Monto Dia Completo', readonly=True)\r\n gasto_estra = fields.Float(string='Monto Gastos Extras', readonly=True)\r\n gasto_imp = fields.Float(string='Monto Gastos de Impuestos', readonly=True)\r\n\r\n monto_total = fields.Float(string='Total')\r\n\r\n @api.onchange('employee_id')\r\n def _onchange_employee(self):\r\n\r\n if self.job_id:\r\n categorias = self.env['viatico.categoria'].search(\r\n [('escala_id', '=', self.registro_id.escala_id.id),\r\n ('zona_id', '=', self.registro_id.zonas_id.id)])\r\n\r\n categoria_id = None\r\n for categoria in categorias:\r\n for trabajo in categoria.puesto_trabajo:\r\n if trabajo.id == self.job_id.id:\r\n categoria_id = categoria.id\r\n break\r\n\r\n if categoria_id:\r\n concepto = self.env['viatico.conceptos'].search(\r\n [('categ_id', '=', categoria_id),\r\n ('tipo', '=', self.registro_id.tipo)])\r\n\r\n self.concepto_id = concepto.id\r\n\r\n @api.onchange('pay_desayuno', 'pay_almuerzo', 'pay_cena', 'pay_alojamiento',\r\n 'pay_gasto_estra', 'pay_gasto_imp', 'pay_dia_completo')\r\n def _onchange_pay(self):\r\n desde= self.str_to_datetime(self.registro_id.fecha_viaje_desde)\r\n hasta= self.str_to_datetime(self.registro_id.fecha_viaje_hasta)\r\n\r\n temp = hasta - desde\r\n\r\n dias = temp.days if temp.days >= 1 else 1\r\n\r\n if self.pay_desayuno:\r\n self.desayuno = self.concepto_id.desayuno * dias\r\n else:\r\n self.desayuno = 0\r\n\r\n if self.pay_almuerzo:\r\n self.almuerzo = self.concepto_id.almuerzo * dias\r\n else:\r\n self.almuerzo = 0\r\n\r\n if self.pay_cena:\r\n self.cena = self.concepto_id.cena * dias\r\n else:\r\n self.cena = 0\r\n\r\n if self.pay_alojamiento:\r\n self.alojamiento = self.concepto_id.alojamiento * dias\r\n else:\r\n self.alojamiento = 0\r\n\r\n if self.pay_gasto_estra:\r\n self.gasto_estraordinario = self.concepto_id.gasto_estraordinario * dias\r\n else:\r\n self.gasto_estraordinario = 0\r\n\r\n if self.pay_gasto_imp:\r\n self.gasto_impuesto = self.concepto_id.gasto_impuesto * dias\r\n else:\r\n self.gasto_impuesto = 0\r\n\r\n if self.pay_dia_completo:\r\n self.dia_completo = self.concepto_id.dia_completo * dias\r\n else:\r\n self.dia_completo = 0\r\n\r\n if not self.pay_desayuno:\r\n self.desayuno = 0\r\n if not self.pay_almuerzo:\r\n self.almuerzo = 0\r\n if not self.pay_cena:\r\n self.cena = 0\r\n if not self.pay_alojamiento:\r\n self.alojamiento = 0\r\n if not self.pay_dia_completo:\r\n self.dia_completo = 0\r\n if not self.pay_gasto_estra:\r\n self.gasto_estraordinario = 0\r\n if not self.pay_gasto_imp:\r\n self.gasto_impuesto = 0\r\n\r\n @api.model\r\n def create(self, values):\r\n # Rellenando los valores que son READONLY\r\n # pdb.set_trace()\r\n registro = self.env['viatico.registro'].browse(values['registro_id'])\r\n desde= self.str_to_datetime(registro.fecha_viaje_desde)\r\n hasta= self.str_to_datetime(registro.fecha_viaje_hasta)\r\n\r\n temp = hasta - desde\r\n\r\n dias = temp.days if temp.days >= 1 else 1\r\n\r\n concepto_id = self.env['viatico.conceptos'].browse(\r\n values['concepto_id'])\r\n\r\n if values['pay_desayuno']:\r\n values['desayuno'] = concepto_id.desayuno * dias\r\n if values['pay_almuerzo']:\r\n values['almuerzo'] = concepto_id.almuerzo * dias\r\n if values['pay_cena']:\r\n values['cena'] = concepto_id.cena * dias\r\n if values['pay_alojamiento']:\r\n values['alojamiento'] = concepto_id.alojamiento * dias\r\n if values['pay_dia_completo']:\r\n values['dia_completo'] = concepto_id.dia_completo * dias\r\n if values['pay_gasto_estra']:\r\n values['gasto_estra'] = concepto_id.gasto_estraordinario * dias\r\n if values['pay_gasto_imp']:\r\n values['gasto_imp'] = concepto_id.gasto_impuesto * dias\r\n return super(RegistroDeViaticosLines, self).create(values)\r\n\r\n @api.multi\r\n def write(self, values, context=None):\r\n\r\n registro = self.env['viatico.registro'].browse(self.registro_id.id)\r\n desde = self.str_to_datetime(registro.fecha_viaje_desde)\r\n hasta = self.str_to_datetime(registro.fecha_viaje_hasta)\r\n\r\n temp = hasta - desde\r\n\r\n dias = temp.days if temp.days >= 1 else 1\r\n concepto_id = self.env['viatico.conceptos'].browse(\r\n self.concepto_id.id)\r\n\r\n if 'pay_desayuno' in values:\r\n if values['pay_desayuno'] == True:\r\n values['desayuno'] = concepto_id.desayuno * dias\r\n else :\r\n values['desayuno'] = 0\r\n if 'pay_almuerzo' in values:\r\n if values['pay_almuerzo'] == True:\r\n values['almuerzo'] = concepto_id.almuerzo * dias\r\n else :\r\n values['almuerzo'] = 0\r\n if 'pay_cena' in values:\r\n if values['pay_cena'] == True:\r\n values['cena'] = concepto_id.cena * dias\r\n else :\r\n values['cena'] = 0\r\n if 'pay_alojamiento' in values:\r\n if values['pay_alojamiento'] == True:\r\n values['alojamiento'] = concepto_id.alojamiento * dias\r\n else :\r\n values['alojamiento'] = 0\r\n if 'pay_dia_completo' in values:\r\n if values['pay_dia_completo'] == True:\r\n values['dia_completo'] = concepto_id.dia_completo * dias\r\n else :\r\n values['dia_completo'] = 0\r\n if 'pay_gasto_estra' in values:\r\n if values['pay_gasto_estra'] == True:\r\n values['gasto_estra'] = concepto_id.gasto_estraordinario * dias\r\n else :\r\n values['gasto_estra'] = 0\r\n if 'pay_gasto_imp' in values:\r\n if values['pay_gasto_imp'] == True:\r\n values['gasto_imp'] = concepto_id.gasto_impuesto * dias\r\n else :\r\n values['gasto_imp'] = 0\r\n return super(RegistroDeViaticosLines, self).write(values)", "sub_path": "addons-obs/adr_viaticos/registro_viaticos_models.py", "file_name": "registro_viaticos_models.py", "file_ext": "py", "file_size_in_byte": 16095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "openerp.models.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "openerp.fields.Char", "line_number": 29, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 31, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 33, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 33, "usage_type": "name"}, {"api_name": "openerp.fields.Datetime", "line_number": 35, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 35, "usage_type": "name"}, {"api_name": "openerp.fields.Datetime", "line_number": 36, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 36, "usage_type": "name"}, {"api_name": "openerp.fields.Datetime", "line_number": 37, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 37, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 39, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 39, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 41, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 41, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 42, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 42, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 45, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 45, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 46, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 46, "usage_type": "name"}, {"api_name": "openerp.fields.One2many", "line_number": 48, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 48, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 51, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 51, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 53, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 53, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 61, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 61, "usage_type": "name"}, {"api_name": "openerp.fields.Text", "line_number": 68, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 68, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 70, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 70, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 72, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 72, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 74, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 74, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 76, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 76, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 78, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 78, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 81, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 81, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 91, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 91, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 100, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 100, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 107, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 107, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 115, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 115, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 120, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 120, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 163, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 163, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 167, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 167, "usage_type": "name"}, {"api_name": "openerp.api.model", "line_number": 172, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 172, "usage_type": "name"}, {"api_name": "openerp.models.Model", "line_number": 181, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 181, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 196, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 196, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 205, "usage_type": "call"}, {"api_name": "openerp.fields.Many2one", "line_number": 209, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 209, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 211, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 211, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 213, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 213, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 216, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 216, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 222, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 222, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 223, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 223, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 224, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 224, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 225, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 225, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 227, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 227, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 229, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 229, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 231, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 231, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 234, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 234, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 235, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 235, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 236, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 236, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 237, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 237, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 238, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 238, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 239, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 239, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 240, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 240, "usage_type": "name"}, {"api_name": "openerp.fields.Float", "line_number": 242, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 242, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 244, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 244, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 266, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 266, "usage_type": "name"}, {"api_name": "openerp.api.model", "line_number": 326, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 326, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 357, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 357, "usage_type": "name"}]} +{"seq_id": "510332635", "text": "import numpy as np\r\nimport functools as ft\r\n\r\n\r\ndef f1(x, y):\r\n return x + y\r\n\r\n\r\ndef f2(x, y):\r\n return x * y\r\n\r\n\r\na, b = 1, 2\r\nc = f1(a, b)\r\nprint(c)\r\n\r\nd = [1, 2, 3, 4, 5, 6]\r\ne = 0\r\nfor x in d:\r\n e += x\r\nprint(e)\r\n\r\nx = 0\r\nfor i in range(-1, -len(d) - 1, -1):\r\n y = d[i]\r\n x = f1(x, y)\r\nprint(x)\r\n\r\n#f = ft.reduce(f1, d)\r\nf = ft.reduce(lambda x, y: x + y, d)\r\nprint(f)\r\n#\r\n# 5,6->f1->11,4->f1->15,3->f1->18,2->f1->20,1->f1->21\r\n#\r\n#g = ft.reduce(f2, range(1, 6))\r\ng = ft.reduce(lambda x, y: x * y, range(1, 6))\r\nprint(g)\r\n\r\nX = np.array([1, 2, 3, 4, 5])\r\nY = np.array([10, 20, 30, 40, 50])\r\nZ = np.frompyfunc(f1, 2, 1)(X, Y)\r\nprint(Z)\r\n", "sub_path": "AID/reduce.py", "file_name": "reduce.py", "file_ext": "py", "file_size_in_byte": 655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "functools.reduce", "line_number": 30, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.frompyfunc", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "462962081", "text": "# from core import mechanismStackStorage as mss\nfrom core import mechanismStack as ms\nfrom core import mechanism as mech\nfrom core import algorithms as algs\nfrom itertools import permutations as perm_generator\nfrom copy import deepcopy\nimport random, traceback, math, operator, sys\n\nclass InputGenerator:\n @classmethod\n def generate_inputs(cls,\n inputs_min_length,\n inputs_max_length,\n avg_num_symbols,\n num_generated_inputs,\n run_chance=None,\n ascii_char_min=33,\n ascii_char_max=127):\n\n generated_inputs = list()\n for _ in range(num_generated_inputs):\n input_ = cls.generate_input(inputs_min_length,\n inputs_max_length,\n avg_num_symbols,\n run_chance,\n ascii_char_min,\n ascii_char_max)\n generated_inputs.append(input_)\n return generated_inputs\n\n @classmethod\n def generate_input(cls, i_min, i_max, average, run_chance, a_min, a_max):\n input_length = cls.input_length(i_min, i_max)\n num_symbols = cls.num_symbols(average, a_min, a_max)\n chosen_chars = cls.chosen_chars(a_min, a_max, num_symbols)\n run_chance = cls.get_run_chance( run_chance )\n curr_symbol = cls.get_random_symbol( chosen_chars, num_symbols )\n\n output_string = str()\n\n for sym in range( input_length ):\n output_string += curr_symbol\n if ( random.random() < run_chance ):\n continue\n else:\n curr_symbol = cls.get_random_symbol( chosen_chars, num_symbols )\n\n return mech.str_to_bytes( output_string )\n\n @classmethod\n def get_run_chance(cls, run_chance):\n if run_chance == None:\n return random.random()\n else:\n return run_chance\n\n @classmethod\n def get_random_symbol(cls, chosen_chars, num_symbols):\n return chosen_chars[ random.randint(0, num_symbols-1) ]\n\n @classmethod\n def input_length(cls, min, max):\n return random.randint( min, max )\n\n @classmethod\n def num_symbols(cls, average, char_min, char_max):\n char_range = char_max - char_min\n lower = math.ceil(char_range/2)\n upper = char_range*2\n multiplier = random.randint( lower, upper ) / char_range\n num_symbols = multiplier*average\n return math.ceil(num_symbols)\n\n @classmethod\n def chosen_chars(cls, min, max, num_symbols):\n ords = random.sample( range(min, max), num_symbols )\n return list( map( chr, ords ) )\n\nclass PermutationGenerator:\n @classmethod\n def create_permutations_with_dup_mechs(cls, mechanism_list):\n perms_with_dups = list()\n for index, mech in enumerate(mechanism_list):\n mechanism_list_copy = deepcopy(mechanism_list)\n mechanism_list_copy.insert(index, mech)\n for perm in cls.create_permutations( mechanism_list_copy ):\n perms_with_dups.append(perm)\n return perms_with_dups\n\n @classmethod\n def create_permutations(cls, mechanism_list):\n generated_permutations = perm_generator(mechanism_list)\n permutations = list()\n for perm in generated_permutations:\n if 'HUF' in perm:\n permutations.append(cls.sanitize_permutation( list(perm) ))\n else:\n permutations.append(list(perm))\n return permutations\n\n @classmethod\n def sanitize_permutation(cls, permutation):\n huffman_indexes = [i for i,v in enumerate(permutation) if v == 'HUF']\n for i, huf_index in enumerate( huffman_indexes ):\n permutation.insert( huf_index+i+1, 'EST' )\n return permutation\n\nclass TestStorage:\n def __init__(self, permutations, debug=False):\n self.permutations = permutations\n self.debug = debug\n self.tests = list()\n self.num_tests = len(self.permutations)\n self.pass_count = 0\n self.fail_count = 0\n self.fail_dict = dict()\n self.ratio_dict = dict()\n # self.non_compress_dict = dict()\n # self.ascii_char_min = 33\n # self.ascii_char_max = 127\n\n def populate_tests(self):\n # print(self.debug)\n for p in self.permutations:\n self.tests.append( Test( p, self.debug ) )\n\n def print_test_and_result(self, num, num_tests, test, input_, result):\n print()\n\n if result[2] not in self.ratio_dict:\n self.ratio_dict[result[2]] = 1\n else:\n self.ratio_dict[result[2]] += 1\n\n if result[0] == True:\n self.pass_count += 1\n else:\n self.fail_count +=1\n # self.fail_dict[test.mechanisms] = input_\n\n # # if result[1] == False:\n # if test.mechanisms not in self.non_compress_dict:\n # self.non_compress_dict[test.mechanisms] = 0\n\n # if result[1] == False:\n # self.non_compress_dict[test.mechanisms] += 1\n\n print(\"Test \" + str( num ) + \"/\" + str(num_tests) + \"... \", end='' )\n print(test.mechanisms, end=''),\n # print(input_)\n # print(len(result))\n print(\"Success:\", result[0], \"/ Compressed:\", result[1], \"/ Ratio:\", result[2])\n\n\n def execute_tests(self, inputs):\n num_tests = len(self.permutations) * len(inputs)\n test_counter = 1\n\n for rand_input in inputs:\n for test in self.tests:\n result = test.process_input(rand_input)\n test_counter += 1\n self.print_test_and_result(test_counter, num_tests, test, rand_input, result)\n\n result = test.process_input( rand_input )\n # print(\"Success:\", result[0], \"/ Compressed:\", result[1])\n\n print(\"Pass:\", self.pass_count)\n print(\"Fail:\", self.fail_count)\n # print(self.fail_dict)\n # print(self.non_compress_dict)\n # for i in list(reversed(sorted(self.non_compress_dict.items(), key=operator.itemgetter(1)))):\n # print(i)\n\n # for i in list(reversed(sorted(self.ratio_dict.items(), key=operator.itemgetter(0)))):\n # print(i)\n\n def execute_single_test(self, input_, lorem=False):\n for test in self.tests:\n result = test.process_input(input_, lorem)\n print(result)\n\nclass Test:\n\n def __str__(self):\n # return self.__class__.__name__\n return str([m for m in self.mech_stack.compression_stack] ) + \" \"\n\n def __init__(self, mechanisms, debug=False):\n self.mechanisms = mechanisms\n self.debug = debug\n self.mech_stack = ms.MechanismStack(mechanisms, 0, self.debug)\n\n\n def process_input(self, input_, lorem=False):\n try:\n input_length = len( input_ )\n encoded = self.mech_stack.encode( input_ )\n if lorem:\n file = open(\"data/loremX\", \"wb\")\n file.write(encoded)\n encoded_length = len( encoded )\n decoded = self.mech_stack.decode( encoded )\n except Exception:\n if self.debug:\n\n traceback.print_exc()\n return (False, False, -1)\n\n return( decoded == input_,\n encoded_length <= input_length,\n (input_length/encoded_length))\n\n\nif __name__ == '__main__':\n\n print(\"Import Success\")\n\n inputs = InputGenerator.generate_inputs(50, 1000, 20, 20, 0.2)\n perms = PermutationGenerator.create_permutations(['BWT', 'HUF', 'MTF', 'RLE'])\n test_storage = TestStorage(perms)\n test_storage.populate_tests()\n test_storage.execute_tests(inputs)\n # print(perms)\n", "sub_path": "tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 7858, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "random.random", "line_number": 43, "usage_type": "call"}, {"api_name": "core.mechanism.str_to_bytes", "line_number": 48, "usage_type": "call"}, {"api_name": "core.mechanism", "line_number": 48, "usage_type": "name"}, {"api_name": "random.random", "line_number": 53, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 68, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 70, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 72, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 76, "usage_type": "call"}, {"api_name": "core.mechanism", "line_number": 83, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 84, "usage_type": "call"}, {"api_name": "core.mechanism", "line_number": 85, "usage_type": "argument"}, {"api_name": "itertools.permutations", "line_number": 92, "usage_type": "call"}, {"api_name": "core.mechanismStack.MechanismStack", "line_number": 192, "usage_type": "call"}, {"api_name": "core.mechanismStack", "line_number": 192, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 207, "usage_type": "call"}]} +{"seq_id": "152585156", "text": "#from pyramid.security import (\n# Allow,\n# Authenticated,\n# Deny,\n# Everyone,\n#)\nfrom .download import ItemWithDocument\nfrom ..contentbase import (\n Collection,\n location\n)\nfrom ..schema_utils import (\n load_schema,\n)\nfrom collections import OrderedDict\n\n\n@location('labs')\nclass Lab(Collection):\n item_type = 'lab'\n schema = load_schema('lab.json')\n properties = {\n 'title': 'Labs',\n 'description': 'Listing of ENCODE DCC labs',\n }\n item_links = {\n 'awards': [\n {'$value': '/awards/{award_uuid}', '$templated': True,\n '$repeat': 'award_uuid award_uuids'}\n ]\n }\n\n\n@location('awards')\nclass Award(Collection):\n item_type = 'award'\n schema = load_schema('award.json')\n properties = {\n 'title': 'Awards (Grants)',\n 'description': 'Listing of awards (aka grants)',\n }\n #item_keys = ['name'] should this be unique\n\n\n@location('antibody-lots')\nclass AntibodyLots(Collection):\n item_type = 'antibody_lot'\n #schema = load_schema('antibody_lot.json')\n properties = {\n 'title': 'Antibodies Registry',\n 'description': 'Listing of ENCODE antibodies',\n }\n item_links = {\n 'source': {'$value': '/sources/{source_uuid}', '$templated': True},\n }\n item_embedded = set(['source'])\n item_keys = [\n {'name': 'accession', 'value': '{antibody_accession}', '$templated': True},\n {'name': '{item_type}:source_product_lot', 'value': '{source_uuid}/{product_id}/{lot_id}', '$templated': True},\n ]\n\n\n@location('organisms')\nclass Organism(Collection):\n item_type = 'organism'\n schema = load_schema('organism.json')\n properties = {\n 'title': 'Organisms',\n 'description': 'Listing of all registered organisms',\n 'description': 'Listing of sources and vendors for ENCODE material',\n }\n\n\n@location('sources')\nclass Source(Collection):\n item_type = 'source'\n schema = load_schema('source.json')\n properties = {\n 'title': 'Sources',\n 'description': 'Listing of sources and vendors for ENCODE material',\n }\n item_links = {\n 'actions': [\n {'name': 'edit', 'title': 'Edit', 'profile': '/profiles/{item_type}.json', 'method': 'POST', 'href': '', '$templated': True, '$condition': 'permission:edit'},\n ],\n }\n\n\n@location('donors')\nclass Donor(Collection):\n item_type = 'donor'\n ## schema = load_schema('donor.json') Doesn't exist yet\n properties = {\n 'title': 'Donors',\n 'description': 'Listing Biosample Donors',\n }\n item_links = {\n 'organism': {'$value': '/organisms/{organism_uuid}', '$templated': True},\n }\n item_embedded = set(['organism'])\n item_keys = ['donor_id']\n\n\n@location('treatments')\nclass Treatment(Collection):\n item_type = 'biosample_treatment'\n ## schema = load_schema('treatment.json') Doesn't exist yet\n properties = {\n 'title': 'Treatments',\n 'description': 'Listing Biosample Treatments',\n }\n item_keys = ['treatment_name']\n\n\n@location('constructs')\nclass Construct(Collection):\n item_type = 'biosample_construct'\n properties = {\n 'title': 'Constructs',\n 'description': 'Listing of Biosample Constructs',\n }\n item_links = {\n 'source': {'$value': '/sources/{source_uuid}', '$templated': True},\n 'documents': [\n {'$value': '/documents/{document_uuid}', '$templated': True, '$repeat': 'document_uuid document_uuids'},\n ],\n }\n item_embedded = set(['source', 'documents'])\n item_keys = ['vector_name']\n\n\n@location('documents')\nclass Document(Collection):\n item_type = 'biosample_document'\n properties = {\n 'title': 'Documents',\n 'description': 'Listing of Biosample Documents',\n }\n\n class Item(ItemWithDocument):\n keys = ['document_name']\n links = {\n 'submitter': {'$value': '/users/{submitter_uuid}', '$templated': True},\n 'lab': {'$value': '/labs/{lab_uuid}', '$templated': True},\n 'award': {'$value': '/awards/{award_uuid}', '$templated': True},\n }\n embedded = set(['submitter', 'lab', 'award'])\n\n\n@location('biosamples')\nclass Biosample(Collection):\n item_type = 'biosample'\n #schema = load_schema('biosample.json')\n properties = {\n 'title': 'Biosamples',\n 'description': 'Biosamples used in the ENCODE project',\n }\n item_links = {\n 'submitter': {'$value': '/users/{submitter_uuid}', '$templated': True},\n 'source': {'$value': '/sources/{source_uuid}', '$templated': True},\n 'lab': {'$value': '/labs/{lab_uuid}', '$templated': True},\n 'award': {'$value': '/awards/{award_uuid}', '$templated': True},\n 'donor': {'$value': '/donors/{donor_uuid}', '$templated': True},\n 'documents': [\n {'$value': '/documents/{document_uuid}', '$templated': True, '$repeat': 'document_uuid document_uuids'},\n ],\n 'treatments': [\n {'$value': '/treatments/{treatment_uuid}', '$templated': True, '$repeat': 'treatment_uuid treatment_uuids'},\n ],\n 'constructs': [\n {'$value': '/constructs/{construct_uuid}', '$templated': True, '$repeat': 'construct_uuid construct_uuids'},\n ],\n }\n item_embedded = set(['donor', 'submitter', 'lab', 'award', 'source', 'treatments', 'constructs', 'documents'])\n item_keys = [{'name': 'accession', 'value': '{accession}', '$templated': True}]\n columns = OrderedDict([\n ('accession', 'Accession'),\n ('biosample_term_name', 'Term'),\n ('biosample_type', 'Type'),\n ('donor.organism.organism_name', 'Species'),\n ('source.alias', 'Source'),\n ('lab.name', 'Submitter'),\n ('treatments.length', 'Treatments'),\n ('constructs.length', 'Constructs')\n ])\n\n\n@location('targets')\nclass Target(Collection):\n item_type = 'target'\n #schema = load_schema('target.json')\n properties = {\n 'title': 'Targets',\n 'description': 'Listing of ENCODE3 targets',\n }\n item_links = {\n 'organism': {'$value': '/organisms/{organism_uuid}', '$templated': True},\n 'submitter': {'$value': '/users/{submitter_uuid}', '$templated': True},\n 'lab': {'$value': '/labs/{lab_uuid}', '$templated': True},\n 'award': {'$value': '/awards/{award_uuid}', '$templated': True},\n }\n item_embedded = set(['organism', 'submitter', 'lab', 'award'])\n # item_keys = [('target_label', 'organism_name')] multi columns not implemented yet\n columns = OrderedDict([\n ('target_label', 'Target'),\n ('organism.organism_name', 'Species'),\n ('dbxref.uniprot', 'External Resources'),\n ('project', 'Project')\n ])\n\n\n# The following should really be child collections.\n@location('validations')\nclass AntibodyValidation(Collection):\n item_type = 'antibody_validation'\n #schema = load_schema('validation.json')\n properties = {\n 'title': 'Antibody Validations',\n 'description': 'Listing of antibody validation documents',\n }\n\n class Item(ItemWithDocument):\n links = {\n 'antibody_lot': {'$value': '/antibody-lots/{antibody_lot_uuid}', '$templated': True},\n 'target': {'$value': '/targets/{target_uuid}', '$templated': True},\n 'submitter': {'$value': '/users/{submitter_uuid}', '$templated': True},\n 'lab': {'$value': '/labs/{lab_uuid}', '$templated': True},\n 'award': {'$value': '/awards/{award_uuid}', '$templated': True},\n }\n embedded = set(['antibody_lot', 'target', 'submitter', 'lab', 'award'])\n\n\n@location('antibodies')\nclass AntibodyApproval(Collection):\n #schema = load_schema('antibody_approval.json')\n item_type = 'antibody_approval'\n properties = {\n 'title': 'Antibody Approvals',\n 'description': 'Listing of validation approvals for ENCODE antibodies',\n }\n item_links = {\n 'antibody_lot': {'$value': '/antibody-lots/{antibody_lot_uuid}', '$templated': True},\n 'target': {'$value': '/targets/{target_uuid}', '$templated': True},\n 'validations': [\n {'$value': '/validations/{validation_uuid}', '$templated': True, '$repeat': 'validation_uuid validation_uuids'},\n ],\n }\n item_embedded = set(['antibody_lot', 'target', 'validations'])\n item_keys = [\n {'name': '{item_type}:lot_target', 'value': '{antibody_lot_uuid}/{target_uuid}', '$templated': True}\n ]\n item_rels = [\n {'rel': 'antibody_lot', 'target': '{antibody_lot_uuid}', '$templated': True},\n {'rel': 'target', 'target': '{target_uuid}', '$templated': True},\n {'rel': 'validation', 'target': '{validation_uuid}', '$templated': True, '$repeat': 'validation_uuid validation_uuids'},\n ]\n columns = OrderedDict([\n ('antibody_lot.antibody_accession', 'Accession'),\n ('target.target_label', 'Target'),\n ('target.organism.organism_name', 'Species'),\n ('antibody_lot.source.source_name', 'Source'),\n ('antibody_lot.product_id', 'Product ID'),\n ('antibody_lot.lot_id', 'Lot ID'),\n ('validations.length', 'Validations'),\n ('approval_status', 'Status')\n ])\n\n\n@location('platforms')\nclass Platform(Collection):\n item_type = 'platform'\n properties = {\n 'title': 'Platforms',\n 'description': 'Listing of Platforms',\n }\n\n\n@location('libraries')\nclass Library(Collection):\n item_type = 'library'\n properties = {\n 'title': 'Libraries',\n 'description': 'Listing of Libraries',\n }\n item_links = {\n 'biosample': {'$value': '/biosamples/{biosample_uuid}', '$templated': True},\n 'documents': [\n {'$value': '/documents/{document_uuid}', '$templated': True, '$repeat': 'document_uuid document_uuids'},\n ],\n }\n item_embedded = set(['biosample', 'documents'])\n item_keys = [{'name': 'accession', 'value': '{accession}', '$templated': True}]\n\n\n@location('assays')\nclass Assays(Collection):\n item_type = 'assay'\n properties = {\n 'title': 'Assays',\n 'description': 'Listing of Assays',\n }\n\n\n@location('replicates')\nclass Replicates(Collection):\n item_type = 'replicate'\n properties = {\n 'title': 'Replicates',\n 'description': 'Listing of Replicates',\n }\n item_links = {\n 'library': {'$value': '/libraries/{library_uuid}', '$templated': True},\n 'platform': {'$value': '/platforms/{platform_uuid}', '$templated': True},\n 'assay': {'$value': '/assays/{assay_uuid}', '$templated': True},\n }\n item_embedded = set(['library', 'platform', 'assay'])\n\n\n@location('files')\nclass Files(Collection):\n item_type = 'file'\n properties = {\n 'title': 'Files',\n 'description': 'Listing of Files',\n }\n item_links = {\n 'submitter': {'$value': '/users/{submitter_uuid}', '$templated': True},\n 'lab': {'$value': '/labs/{lab_uuid}', '$templated': True},\n 'award': {'$value': '/awards/{award_uuid}', '$templated': True},\n }\n item_embedded = set(['submitter', 'lab', 'award'])\n\n\n@location('experiments')\nclass Experiments(Collection):\n item_type = 'experiment'\n properties = {\n 'title': 'Experiments',\n 'description': 'Listing of Experiments',\n }\n item_links = {\n 'submitter': {'$value': '/users/{submitter_uuid}', '$templated': True},\n 'lab': {'$value': '/labs/{lab_uuid}', '$templated': True},\n 'award': {'$value': '/awards/{award_uuid}', '$templated': True},\n 'files': [\n {'$value': '/files/{file_uuid}', '$templated': True, '$repeat': 'file_uuid file_uuids'},\n ],\n 'replicates': [\n {'$value': '/replicates/{replicate_uuid}', '$templated': True, '$repeat': 'replicate_uuid replicate_uuids'},\n ],\n 'controls': [\n {'$value': '/experiments/{experiment_control_uuid}', '$templated': True, '$repeat': 'experiment_control_uuid experiment_control_uuids'},\n ],\n }\n item_embedded = set(['files', 'replicates', 'submitter', 'lab', 'award', 'controls'])\n item_keys = [{'name': 'accession', 'value': '{dataset_accession}', '$templated': True}]\n columns = OrderedDict([\n ('dataset_accession', 'Accession'),\n ('replicates.0.assay.assay_name', 'Assay Type'),\n ('replicates.0.target', 'Target'),\n ('replicates.0.library.biosample.biosample_term_name', 'Biosample'),\n ('replicates.length', 'Biological Replicates'),\n ('files.length', 'Files'),\n ('lab.name', 'Lab'),\n ('project', 'Project')\n ])\n", "sub_path": "src/encoded/views/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "contentbase.Collection", "line_number": 19, "usage_type": "name"}, {"api_name": "schema_utils.load_schema", "line_number": 21, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 18, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 35, "usage_type": "name"}, {"api_name": "schema_utils.load_schema", "line_number": 37, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 34, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 46, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 45, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 64, "usage_type": "name"}, {"api_name": "schema_utils.load_schema", "line_number": 66, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 63, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 75, "usage_type": "name"}, {"api_name": "schema_utils.load_schema", "line_number": 77, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 74, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 90, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 89, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 105, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 104, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 116, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 115, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 133, "usage_type": "name"}, {"api_name": "download.ItemWithDocument", "line_number": 140, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 132, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 151, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 176, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 150, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 189, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 204, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 188, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 214, "usage_type": "name"}, {"api_name": "download.ItemWithDocument", "line_number": 222, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 213, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 234, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 257, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 233, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 270, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 269, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 279, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 278, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 296, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 295, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 305, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 304, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 320, "usage_type": "name"}, {"api_name": "contentbase.location", "line_number": 319, "usage_type": "call"}, {"api_name": "contentbase.Collection", "line_number": 335, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 357, "usage_type": "call"}, {"api_name": "contentbase.location", "line_number": 334, "usage_type": "call"}]} +{"seq_id": "61612126", "text": "from django.db import models\n\n\nclass Camera(models.Model):\n CAMERA_TYPES=(\n ('M', 'Camera Module'),\n ('N', 'Network'),\n ('U', 'USB'),\n )\n type = models.CharField(choices=CAMERA_TYPES, max_length=1)\n\n def __unicode__(self):\n return u'%s - %s' % (self.type, self.id)\n\n\nclass Sensor(models.Model):\n SENSOR_TYPES=(\n ('H', 'Humidity'),\n ('L', 'Light'),\n ('P', 'PH'),\n ('S', 'Soil'),\n ('T', 'Temperature'),\n ('E', 'Temperature & Humidity'),\n )\n type = models.CharField(choices=SENSOR_TYPES, max_length=1)\n name = models.CharField(max_length=200, blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n pin_in = models.IntegerField(default=0)\n pin_out = models.IntegerField(default=0)\n range_low = models.IntegerField(default=0)\n range_high = models.IntegerField(default=1)\n trigger = models.IntegerField(default=0)\n\n def __unicode__(self):\n return u'%s' % (self.name)\n\n def save(self, *args, **kwargs):\n super(Sensor, self).save(*args, **kwargs) \n if not self.name:\n self.name = 'sensor' + str(self.id)\n self.save()\n\nclass Zone(models.Model):\n name = models.CharField(max_length=200, blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n sensors = models.ManyToManyField(Sensor, blank=True) \n cameras = models.ManyToManyField(Camera, blank=True) \n\n def __unicode__(self):\n return u'%s' % (self.name)\n\n def save(self, *args, **kwargs):\n if not self.name:\n self.name = 'zone' + str(self.id)\n super(Zone, self).save(*args, **kwargs) \n", "sub_path": "growberry/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.models.Model", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "12664672", "text": "# -------------------------------------------------------------------------\n#\n# Part of the CodeChecker project, under the Apache License v2.0 with\n# LLVM Exceptions. See LICENSE for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n#\n# -------------------------------------------------------------------------\n\"\"\"\nClang Static Analyzer related functions.\n\"\"\"\n\n\nimport os\nimport re\nimport shlex\nimport subprocess\n\nfrom typing import Dict, List\n\nfrom codechecker_common.logger import get_logger\n\nfrom codechecker_analyzer import env\n\nfrom .. import analyzer_base\nfrom ..config_handler import CheckerState\nfrom ..flag import has_flag\nfrom ..flag import prepend_all\n\nfrom . import clang_options\nfrom . import config_handler\nfrom . import ctu_triple_arch\nfrom . import version\nfrom .result_handler import ResultHandlerClangSA\n\nLOG = get_logger('analyzer')\n\n\ndef parse_clang_help_page(\n command: List[str],\n start_label: str,\n environ: Dict[str, str]\n) -> List[str]:\n \"\"\"\n Parse the clang help page starting from a specific label.\n Returns a list of (flag, description) tuples.\n \"\"\"\n try:\n help_page = subprocess.check_output(\n command,\n stderr=subprocess.STDOUT,\n env=environ,\n universal_newlines=True,\n encoding=\"utf-8\",\n errors=\"ignore\")\n except (subprocess.CalledProcessError, OSError):\n LOG.debug(\"Failed to run '%s' command!\", command)\n return []\n\n try:\n help_page = help_page[help_page.index(start_label) + len(start_label):]\n except ValueError:\n return []\n\n # This regex will match lines which contain only a flag or a flag and a\n # description: ' ', ' '.\n start_new_option_rgx = \\\n re.compile(r\"^\\s{2}(?P\\S+)(\\s(?P[^\\n]+))?$\")\n\n # This regex will match lines which contain description for the previous\n # flag: ' '\n prev_help_desc_rgx = \\\n re.compile(r\"^\\s{3,}(?P[^\\n]+)$\")\n\n res = []\n\n flag = None\n desc = []\n for line in help_page.splitlines():\n m = start_new_option_rgx.match(line)\n if m:\n if flag and desc:\n res.append((flag, ' '.join(desc)))\n flag = None\n desc = []\n\n flag = m.group(\"flag\")\n else:\n m = prev_help_desc_rgx.match(line)\n\n if m and m.group(\"desc\"):\n desc.append(m.group(\"desc\").strip())\n\n if flag and desc:\n res.append((flag, ' '.join(desc)))\n\n return res\n\n\nclass ClangSA(analyzer_base.SourceAnalyzer):\n \"\"\"\n Constructs clang static analyzer commands.\n \"\"\"\n ANALYZER_NAME = 'clangsa'\n\n def __init__(self, cfg_handler, buildaction):\n super(ClangSA, self).__init__(cfg_handler, buildaction)\n self.__disable_ctu = False\n self.__checker_configs = []\n\n def is_ctu_available(self):\n \"\"\"\n Check if ctu is available for the analyzer.\n If the ctu_dir is set in the config, the analyzer is capable to\n run ctu analysis.\n \"\"\"\n return bool(self.config_handler.ctu_dir)\n\n def is_ctu_enabled(self):\n \"\"\"\n Check if ctu is enabled for the analyzer.\n \"\"\"\n return not self.__disable_ctu\n\n def disable_ctu(self):\n \"\"\"\n Disable ctu even if ctu is available.\n By default it is enabled if available.\n \"\"\"\n self.__disable_ctu = True\n\n def enable_ctu(self):\n self.__disable_ctu = False\n\n def add_checker_config(self, checker_cfg):\n \"\"\"\n Add configuration options to specific checkers.\n checker_cfg should be a list of arguments in case of\n Clang Static Analyzer like this:\n ['-Xclang', '-analyzer-config', '-Xclang', 'checker_option=some_value']\n \"\"\"\n\n self.__checker_configs.append(checker_cfg)\n\n @classmethod\n def get_analyzer_checkers(\n cls,\n cfg_handler: config_handler.ClangSAConfigHandler,\n environ: Dict[str, str]\n ) -> List[str]:\n \"\"\"Return the list of the supported checkers.\"\"\"\n checker_list_args = clang_options.get_analyzer_checkers_cmd(\n cfg_handler,\n alpha=True)\n return parse_clang_help_page(checker_list_args, 'CHECKERS:', environ)\n\n @classmethod\n def get_checker_config(\n cls,\n cfg_handler: config_handler.ClangSAConfigHandler,\n environ: Dict[str, str]\n ) -> List[str]:\n \"\"\"Return the list of checker config options.\"\"\"\n checker_config_args = clang_options.get_checker_config_cmd(\n cfg_handler,\n alpha=True)\n return parse_clang_help_page(checker_config_args, 'OPTIONS:', environ)\n\n @classmethod\n def get_analyzer_config(\n cls,\n cfg_handler: config_handler.ClangSAConfigHandler,\n environ: Dict[str, str]\n ) -> List[str]:\n \"\"\"Return the list of analyzer config options.\"\"\"\n analyzer_config_args = clang_options.get_analyzer_config_cmd(\n cfg_handler)\n return parse_clang_help_page(analyzer_config_args, 'OPTIONS:', environ)\n\n def construct_analyzer_cmd(self, result_handler):\n \"\"\"\n Called by the analyzer method.\n Construct the analyzer command.\n \"\"\"\n try:\n # Get an output file from the result handler.\n analyzer_output_file = result_handler.analyzer_result_file\n\n # Get the checkers list from the config_handler.\n # Checker order matters.\n config = self.config_handler\n\n analyzer_cmd = [config.analyzer_binary, '--analyze',\n # Do not warn about the unused gcc/g++ arguments.\n '-Qunused-arguments']\n\n for plugin in config.analyzer_plugins:\n analyzer_cmd.extend([\"-Xclang\", \"-plugin\",\n \"-Xclang\", \"checkercfg\",\n \"-Xclang\", \"-load\",\n \"-Xclang\", plugin])\n\n analyzer_mode = 'plist-multi-file'\n analyzer_cmd.extend(['-Xclang',\n '-analyzer-opt-analyze-headers',\n '-Xclang',\n '-analyzer-output=' + analyzer_mode,\n '-o', analyzer_output_file])\n\n # Expand macros in plist output on the bug path.\n analyzer_cmd.extend(['-Xclang',\n '-analyzer-config',\n '-Xclang',\n 'expand-macros=true'])\n\n # Checker configuration arguments needs to be set before\n # the checkers.\n if self.__checker_configs:\n for cfg in self.__checker_configs:\n analyzer_cmd.extend(cfg)\n\n # TODO: This object has a __checker_configs attribute and the\n # corresponding functions to set it. Either those should be used\n # for checker configs coming as command line argument, or those\n # should be eliminated.\n for cfg in config.checker_config:\n analyzer_cmd.extend(\n ['-Xclang', '-analyzer-config', '-Xclang', cfg])\n\n # Config handler stores which checkers are enabled or disabled.\n for checker_name, value in config.checks().items():\n state, _ = value\n if state == CheckerState.enabled:\n analyzer_cmd.extend(['-Xclang',\n '-analyzer-checker=' + checker_name])\n elif state == CheckerState.disabled:\n analyzer_cmd.extend(['-Xclang',\n '-analyzer-disable-checker=' +\n checker_name])\n\n # Enable aggressive-binary-operation-simplification option.\n analyzer_cmd.extend(\n clang_options.get_abos_options(config.version_info))\n\n # Enable the z3 solver backend.\n if config.enable_z3:\n analyzer_cmd.extend(['-Xclang', '-analyzer-constraints=z3'])\n\n if config.enable_z3_refutation and not config.enable_z3:\n analyzer_cmd.extend(['-Xclang',\n '-analyzer-config',\n '-Xclang',\n 'crosscheck-with-z3=true'])\n\n if config.ctu_dir and not self.__disable_ctu:\n analyzer_cmd.extend(\n ['-Xclang', '-analyzer-config', '-Xclang',\n 'experimental-enable-naive-ctu-analysis=true',\n '-Xclang', '-analyzer-config', '-Xclang',\n 'ctu-dir=' + self.get_ctu_dir()])\n ctu_display_progress = config.ctu_capability.display_progress\n if ctu_display_progress:\n analyzer_cmd.extend(ctu_display_progress)\n\n if config.ctu_on_demand:\n invocation_list_path = \\\n os.path.join(self.get_ctu_dir(), 'invocation-list.yml')\n analyzer_cmd.extend(\n ['-Xclang', '-analyzer-config', '-Xclang',\n f'ctu-invocation-list={invocation_list_path}'\n ])\n\n compile_lang = self.buildaction.lang\n if not has_flag('-x', analyzer_cmd):\n analyzer_cmd.extend(['-x', compile_lang])\n\n if not has_flag('--target', analyzer_cmd) and \\\n self.buildaction.target.get(compile_lang, \"\") != \"\":\n analyzer_cmd.append(\"--target=\" +\n self.buildaction.target.get(compile_lang))\n\n if not has_flag('-arch', analyzer_cmd) and \\\n self.buildaction.arch != \"\":\n analyzer_cmd.extend([\"-arch \", self.buildaction.arch])\n\n if not has_flag('-std', analyzer_cmd) and \\\n self.buildaction.compiler_standard.get(compile_lang, \"\") \\\n != \"\":\n analyzer_cmd.append(\n self.buildaction.compiler_standard[compile_lang])\n\n analyzer_cmd.extend(config.analyzer_extra_arguments)\n\n analyzer_cmd.extend(self.buildaction.analyzer_options)\n\n analyzer_cmd.extend(prepend_all(\n '-isystem',\n self.buildaction.compiler_includes[compile_lang]))\n\n analyzer_cmd.append(self.source_file)\n\n return analyzer_cmd\n\n except Exception as ex:\n LOG.error(ex)\n return []\n\n def get_ctu_dir(self):\n \"\"\"\n Returns the path of the ctu directory (containing the triple).\n \"\"\"\n config = self.config_handler\n environ = env.extend(config.path_env_extra,\n config.ld_lib_path_extra)\n triple_arch = ctu_triple_arch.get_triple_arch(self.buildaction,\n self.source_file,\n config, environ)\n ctu_dir = os.path.join(config.ctu_dir, triple_arch)\n return ctu_dir\n\n def analyzer_mentioned_file_real_path(self, mentioned_path):\n \"\"\"\n PCH-based an On-demand-parsed CTU modes use different paths and file\n suffixes. PCH-based mode uses ast dump files that are suffixed with\n '.ast', and they are supposed to be under the\n '/ast/'. On-demand-parsed mode uses the\n full paths of the original source files.\n \"\"\"\n pch_suffix = '.ast'\n\n # We convert the given file path to absolute path because we suppose\n # that in the clang's output the PCH files in CTU mode are relative\n # paths.\n mentioned_path = os.path.join(self.get_ctu_dir(), mentioned_path)\n\n # Detect the mode based on the path.\n suffix_index = mentioned_path.rfind(pch_suffix)\n # If the file does not have the suffix, the mode is On-demand-parsed.\n # Return the original path.\n if suffix_index == -1:\n LOG.debug(\"Analyzer mentioned path path: '%s', \"\n \"corresponding source file: '%s'\",\n mentioned_path, mentioned_path)\n return mentioned_path\n\n # PCH-based mode stores files with their full path structure recreated\n # under /ast.\n ctu_ast_dir = os.path.join(self.get_ctu_dir(), 'ast')\n\n source_path = mentioned_path[len(ctu_ast_dir):suffix_index]\n\n LOG.debug(\"Analyzer mentioned path path: '%s', \"\n \"corresponding source file: '%s'\",\n mentioned_path, source_path)\n\n if not mentioned_path.startswith(ctu_ast_dir):\n LOG.error(\n \"Mentioned path '%s' ends with suffix '%s', but does \"\n \"not begin with supposed ast dir '%s'.\", mentioned_path,\n pch_suffix, ctu_ast_dir)\n\n # Strip the prefix ast directory and the suffix.\n return mentioned_path[len(ctu_ast_dir):suffix_index]\n\n def get_analyzer_mentioned_files(self, output):\n \"\"\"\n Parse ClangSA's output to generate a list of files that were mentioned\n in the standard output or standard error.\n \"\"\"\n if not output:\n return set()\n\n regex_for_ctu_ast_load = re.compile(\n r\"CTU loaded AST file: (.*)\")\n\n paths = set()\n\n for line in output.splitlines():\n match = re.match(regex_for_ctu_ast_load, line)\n if match:\n path = match.group(1)\n paths.add(self.analyzer_mentioned_file_real_path(path))\n\n return paths\n\n @classmethod\n def resolve_missing_binary(cls, configured_binary, environ):\n \"\"\"\n In case of the configured binary for the analyzer is not found in the\n PATH, this method is used to find a callable binary.\n \"\"\"\n\n LOG.debug(\"%s not found in path for ClangSA!\", configured_binary)\n\n if os.path.isabs(configured_binary):\n # Do not autoresolve if the path is an absolute path as there\n # is nothing we could auto-resolve that way.\n return False\n\n # clang, clang-5.0, clang++, clang++-5.1, ...\n clang = env.get_binary_in_path(['clang', 'clang++'],\n r'^clang(\\+\\+)?(-\\d+(\\.\\d+){0,2})?$',\n environ)\n\n if clang:\n LOG.debug(\"Using '%s' for ClangSA!\", clang)\n return clang\n\n def construct_result_handler(self, buildaction, report_output,\n checker_labels, skiplist_handler):\n \"\"\"\n See base class for docs.\n \"\"\"\n res_handler = ResultHandlerClangSA(buildaction, report_output,\n self.config_handler.report_hash)\n\n res_handler.checker_labels = checker_labels\n res_handler.skiplist_handler = skiplist_handler\n\n return res_handler\n\n @classmethod\n def construct_config_handler(cls, args, context):\n\n environ = env.extend(context.path_env_extra,\n context.ld_lib_path_extra)\n\n handler = config_handler.ClangSAConfigHandler(environ)\n handler.analyzer_plugins_dir = context.checker_plugin\n handler.analyzer_binary = context.analyzer_binaries.get(\n cls.ANALYZER_NAME)\n handler.version_info = version.get(handler.analyzer_binary, environ)\n\n handler.report_hash = args.report_hash \\\n if 'report_hash' in args else None\n\n handler.enable_z3 = 'enable_z3' in args and args.enable_z3 == 'on'\n\n handler.enable_z3_refutation = 'enable_z3_refutation' in args and \\\n args.enable_z3_refutation == 'on'\n\n if 'ctu_phases' in args:\n handler.ctu_dir = os.path.join(args.output_path,\n args.ctu_dir)\n handler.ctu_on_demand = \\\n 'ctu_ast_mode' in args and \\\n args.ctu_ast_mode == 'parse-on-demand'\n handler.log_file = args.logfile\n handler.path_env_extra = context.path_env_extra\n handler.ld_lib_path_extra = context.ld_lib_path_extra\n\n try:\n with open(args.clangsa_args_cfg_file, 'r', encoding='utf8',\n errors='ignore') as sa_cfg:\n handler.analyzer_extra_arguments = \\\n re.sub(r'\\$\\((.*?)\\)',\n env.replace_env_var(args.clangsa_args_cfg_file),\n sa_cfg.read().strip())\n handler.analyzer_extra_arguments = \\\n shlex.split(handler.analyzer_extra_arguments)\n except IOError as ioerr:\n LOG.debug_analyzer(ioerr)\n except AttributeError as aerr:\n # No clangsa arguments file was given in the command line.\n LOG.debug_analyzer(aerr)\n\n checkers = ClangSA.get_analyzer_checkers(handler, environ)\n\n try:\n cmdline_checkers = args.ordered_checkers\n except AttributeError:\n LOG.debug_analyzer('No checkers were defined in '\n 'the command line for %s', cls.ANALYZER_NAME)\n cmdline_checkers = []\n\n handler.initialize_checkers(\n context,\n checkers,\n cmdline_checkers,\n 'enable_all' in args and args.enable_all)\n\n handler.checker_config = []\n r = re.compile(r'(?P.+?):(?P.+?)=(?P.+)')\n\n # TODO: This extra \"isinstance\" check is needed for\n # CodeChecker checkers --checker-config. This command also runs\n # this function in order to construct a config handler.\n if 'checker_config' in args and isinstance(args.checker_config, list):\n for cfg in args.checker_config:\n m = re.search(r, cfg)\n if m.group('analyzer') == cls.ANALYZER_NAME:\n handler.checker_config.append(\n m.group('key') + '=' + m.group('value'))\n\n # TODO: This extra \"isinstance\" check is needed for\n # CodeChecker analyzers --analyzer-config. This command also runs\n # this function in order to construct a config handler.\n if 'analyzer_config' in args and \\\n isinstance(args.analyzer_config, list):\n for cfg in args.analyzer_config:\n m = re.search(r, cfg)\n if m.group('analyzer') == cls.ANALYZER_NAME:\n handler.checker_config.append(\n m.group('key') + '=' + m.group('value'))\n\n return handler\n", "sub_path": "analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py", "file_name": "analyzer.py", "file_ext": "py", "file_size_in_byte": 18852, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "codechecker_common.logger.get_logger", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 41, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 48, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 55, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 67, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 72, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 42, "usage_type": "name"}, {"api_name": "config_handler.ClangSAConfigHandler", "line_number": 147, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 149, "usage_type": "name"}, {"api_name": "config_handler.ClangSAConfigHandler", "line_number": 159, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 160, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 161, "usage_type": "name"}, {"api_name": "config_handler.ClangSAConfigHandler", "line_number": 171, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 173, "usage_type": "name"}, {"api_name": "result_handler.analyzer_result_file", "line_number": 186, "usage_type": "attribute"}, {"api_name": "config_handler.CheckerState.enabled", "line_number": 232, "usage_type": "attribute"}, {"api_name": "config_handler.CheckerState", "line_number": 232, "usage_type": "name"}, {"api_name": "config_handler.CheckerState.disabled", "line_number": 235, "usage_type": "attribute"}, {"api_name": "config_handler.CheckerState", "line_number": 235, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "flag.has_flag", "line_number": 273, "usage_type": "call"}, {"api_name": "flag.has_flag", "line_number": 276, "usage_type": "call"}, {"api_name": "flag.has_flag", "line_number": 281, "usage_type": "call"}, {"api_name": "flag.has_flag", "line_number": 285, "usage_type": "call"}, {"api_name": "flag.prepend_all", "line_number": 295, "usage_type": "call"}, {"api_name": "codechecker_analyzer.env.extend", "line_number": 312, "usage_type": "call"}, {"api_name": "codechecker_analyzer.env", "line_number": 312, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 317, "usage_type": "call"}, {"api_name": "os.path", "line_number": 317, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path", "line_number": 333, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path", "line_number": 347, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 372, "usage_type": "call"}, {"api_name": "re.match", "line_number": 378, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path", "line_number": 394, "usage_type": "attribute"}, {"api_name": "codechecker_analyzer.env.get_binary_in_path", "line_number": 400, "usage_type": "call"}, {"api_name": "codechecker_analyzer.env", "line_number": 400, "usage_type": "name"}, {"api_name": "result_handler.ResultHandlerClangSA", "line_number": 413, "usage_type": "call"}, {"api_name": "codechecker_analyzer.env.extend", "line_number": 424, "usage_type": "call"}, {"api_name": "codechecker_analyzer.env", "line_number": 424, "usage_type": "name"}, {"api_name": "config_handler.ClangSAConfigHandler", "line_number": 427, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 455, "usage_type": "call"}, {"api_name": "codechecker_analyzer.env.replace_env_var", "line_number": 456, "usage_type": "call"}, {"api_name": "codechecker_analyzer.env", "line_number": 456, "usage_type": "name"}, {"api_name": "shlex.split", "line_number": 459, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 482, "usage_type": "call"}, {"api_name": "re.search", "line_number": 489, "usage_type": "call"}, {"api_name": "re.search", "line_number": 500, "usage_type": "call"}]} +{"seq_id": "187850170", "text": "from django.contrib.auth import get_user_model\nfrom rest_framework import status, mixins\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import action\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet, GenericViewSet\n\nfrom users.models import Profile, Relation\nfrom users.serializers import UserSerializers, ProfileSerializer, RelationSerializer\n\nUser = get_user_model()\n\n\nclass UserViewSet(ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializers\n # permission_classes = (IsAuthenticated,)\n\n @action(detail=False, methods=['post'])\n def login(self, request):\n user = User.objects.get(username=request.data.get('username'))\n if user.check_password(request.data.get('password')):\n token, __ = Token.objects.get_or_create(user=user)\n data = {\n \"token\": token.key\n }\n return Response(data, status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n @action(detail=False, methods=['delete'])\n def logout(self, request):\n user = request.user\n user.auth_token.delete()\n data = {\n \"logout!!!!!\"\n }\n return Response(data, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True)\n def follow(self, request, pk=None):\n # pk의 following\n user = get_object_or_404(User, id=pk)\n users = User.objects.filter(\n to_users_relation__from_user=user,\n to_users_relation__related_type='f'\n )\n serializer = UserSerializers(users, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True)\n def follower(self, request, pk=None):\n user = get_object_or_404(User, id=pk)\n users = User.objects.filter(\n from_users_relation__to_user=user,\n from_users_relation__related_type='f'\n )\n serializers = UserSerializers(users, many=True)\n return Response(serializers.data, status=status.HTTP_200_OK)\n\n\nclass ProfileViewSet(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, GenericViewSet):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n # permission_classes = (IsAuthenticated,)\n\n\nclass RelationViewSet(ModelViewSet):\n queryset = Relation.objects.all()\n serializer_class = RelationSerializer\n # permission_classes = (IsAuthenticated,)\n", "sub_path": "minastagram/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 16, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializers", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.get_or_create", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 25, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 29, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 30, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.generics.get_object_or_404", "line_number": 44, "usage_type": "call"}, {"api_name": "users.models", "line_number": 45, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializers", "line_number": 49, "usage_type": "call"}, {"api_name": "users.models", "line_number": 49, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 50, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 50, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.generics.get_object_or_404", "line_number": 54, "usage_type": "call"}, {"api_name": "users.models", "line_number": 55, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializers", "line_number": 59, "usage_type": "call"}, {"api_name": "users.models", "line_number": 59, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 60, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 60, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 63, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 63, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 63, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 63, "usage_type": "name"}, {"api_name": "users.models.Profile.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "users.models.Profile.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "users.models.Profile", "line_number": 64, "usage_type": "name"}, {"api_name": "users.serializers.ProfileSerializer", "line_number": 65, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 69, "usage_type": "name"}, {"api_name": "users.models.Relation.objects.all", "line_number": 70, "usage_type": "call"}, {"api_name": "users.models.Relation.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "users.models.Relation", "line_number": 70, "usage_type": "name"}, {"api_name": "users.serializers.RelationSerializer", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "458398600", "text": "from scipy import misc\nfrom .ecc import *\nfrom .constants import *\nRED = 0\nGREEN = 1\nBLUE = 2\n\ndef color_sum_range(picture, col, x1, x2, y1, y2):\n \"\"\"\n Returns: sum of color intensities over all pixels in given range\n Takes:\n color - color to be added up (R, G, B)\n [x1, x2) x [y1, y2) - range to be modified\n \"\"\"\n return sum([picture[i][k][col] for i in range(x1, x2) for k in range(y1, y2)])\n\ndef maximal_message_length(width, height, block_size):\n \"\"\"\n Returns: maximal possible message lenght hidden in picture\n Takes:\n width: picture width\n height: picture height\n block_size: size of block\n \"\"\"\n return (width // block_size) * (height // block_size) * 2 // CODE_LENGTH\n\ndef possible_block_size(width, height, block_size):\n return maximal_message_length(width, height, block_size) > maximal_message_length(width, height, block_size + 1)\n\ndef parse_picture_to_averages(picture, block_size):\n \"\"\"\n Returns: list of average pixel intensities in blocks of given size as list of lists [R, G, B]\n Takes:\n block_size: size of blocks\n \"\"\"\n height = picture.shape[0]\n width = picture.shape[1]\n result = []\n for i in range(0, height - block_size + 1, block_size):\n for k in range(0, width - block_size + 1, block_size):\n result += [[color_sum_range(picture, c, i, i + block_size, k, k + block_size) / (block_size ** 2) for c in range(3)]]\n return result\n\ndef distance_to_closest(value, targets):\n return min([abs(x - value) for x in targets])\n\ndef data_from_color(average_intensity):\n \"\"\"\n Returns: value from range [0, 1] being approximate probability of bit related with given average being 1\n Takes:\n average_intensity - average intensity of color in block\n \"\"\"\n dist_to_zero = distance_to_closest(average_intensity, zeros)\n dist_to_one = distance_to_closest(average_intensity, ones)\n return dist_to_zero / (dist_to_zero + dist_to_one)\n\n\ndef uncover_data(path):\n \"\"\"\n Returns: data hidden in picture as list of 0 and 1, or [] if there is no data, or picture is corrupted to much\n \"\"\"\n picture = misc.imread(path, mode=\"RGB\")\n height = picture.shape[0]\n width = picture.shape[1]\n for block_size in range(min(width, height), 0, -1):\n if possible_block_size(width, height, block_size):\n block_data = parse_picture_to_averages(picture, block_size)\n minimal_length = CODE_LENGTH / 2 * (maximal_message_length(width, height, block_size + 1) + 1)\n while len(block_data) % (CODE_LENGTH / 2) != 0:\n block_data.pop()\n while len(block_data) > minimal_length:\n has_data_total_dist = sum([distance_to_closest(x[BLUE], has_data) for x in block_data[-CODE_LENGTH:]])\n has_no_data_total_dist = sum([distance_to_closest(x[BLUE], has_no_data) for x in block_data[-CODE_LENGTH:]])\n if has_data_total_dist > has_no_data_total_dist:\n for _ in range(CODE_LENGTH):\n block_data.pop()\n else:\n break\n parsed_data = []\n for bl in block_data:\n parsed_data.append(data_from_color(bl[RED]))\n parsed_data.append(data_from_color(bl[GREEN]))\n data = extract(parsed_data)\n if data:\n return data\n return []\n\n", "sub_path": "src/algorithms/reading.py", "file_name": "reading.py", "file_ext": "py", "file_size_in_byte": 3426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scipy.misc.imread", "line_number": 62, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "78454670", "text": "\"\"\"HADDOCK3 gdock integration module\"\"\"\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom pathlib import Path\n\nfrom haddock import log\nfrom haddock.libs import libpdb\nfrom haddock.libs.libontology import Format, ModuleIO, PDBFile\nfrom haddock.libs.libutil import check_subprocess\nfrom haddock.modules import BaseHaddockModule, working_directory\n\n\nRECIPE_PATH = Path(__file__).resolve().parent\nDEFAULT_CONFIG = Path(RECIPE_PATH, \"defaults.cfg\")\n\n\ndef ambig2dic(ambig_f):\n \"\"\"Read an ambig.tbl file and convert it to a dictionary\"\"\"\n ambig_regex = r\"resid\\s*(\\d*)\\s*and\\s*segid\\s*(\\w)\"\n ambig_dic = {}\n with open(ambig_f) as fh:\n for line in fh.readlines():\n matches = re.finditer(ambig_regex, line)\n for m in matches:\n resid = int(m.group(1))\n chain = m.group(2)\n if chain not in ambig_dic:\n ambig_dic[chain] = []\n\n ambig_dic[chain].append(resid)\n return ambig_dic\n\n\nclass HaddockModule(BaseHaddockModule):\n\n def __init__(self, order, path, initial_params=DEFAULT_CONFIG):\n super().__init__(order, path, initial_params)\n\n @classmethod\n def confirm_installation(cls):\n \"\"\"Confirm this module is installed.\"\"\"\n gdock_path = os.environ['GDOCK_PATH']\n gdock_exec = Path(gdock_path, 'gdock.py')\n check_subprocess(f'{sys.executable} {gdock_exec}')\n\n def run(self, **params):\n log.info(\"Running [gdock] module\")\n\n super().run(params)\n\n try:\n gdock_path = os.environ['GDOCK_PATH']\n except KeyError:\n self.finish_with_error('GDOCK_PATH not defined')\n\n gdock_exec = Path(gdock_path, 'gdock.py')\n if not gdock_exec.exists():\n self.finish_with_error(f'{gdock_exec} not found')\n\n # Get the models generated in previous step\n models_to_dock = [p for p in self.previous_io.output if p.file_type == Format.PDB]\n\n if '00_topoaa' not in Path(models_to_dock[0].path).stem:\n _msg = 'This module must come after Topology generation'\n self.finish_with_error(_msg)\n\n topologies = [p for p in self.previous_io.output if p.file_type == Format.TOPOLOGY]\n\n input_a = Path(models_to_dock[0].path, models_to_dock[0].file_name)\n input_b = Path(models_to_dock[1].path, models_to_dock[1].file_name)\n\n input = {'A': input_a, 'B': input_b}\n # Check if chain IDs are present\n for chain in input:\n pdb = input[chain]\n chain_pdb = Path(self.path, pdb.name)\n segids, chains = libpdb.identify_chainseg(pdb)\n if set(segids) != set(chains):\n log.info(\"No chain IDs found, using segid information\")\n libpdb.swap_segid_chain(pdb, chain_pdb)\n input[chain] = chain_pdb\n\n # convert ambig to list\n ambig_dic = ambig2dic(self.params.get('ambig', None))\n\n input_toml = '' + os.linesep\n input_toml += '[main]' + os.linesep\n input_toml += 'identifier = \"gdock-integration\"' + os.linesep\n\n # this is needed because 'ncores' is defined in BaseHaddockModule\n # by default as None\n ncores = self.params['ncores'] or 1\n input_toml += f'number_of_processors = {ncores}' + os.linesep\n\n input_toml += '[restraints]' + os.linesep\n\n for chain in ambig_dic:\n reslist = list(set(ambig_dic[chain]))\n input_toml += f'{chain} = {reslist}' + os.linesep\n\n input_toml += '[molecules]' + os.linesep\n input_toml += f'A = \\\"{input[\"A\"]}\\\"' + os.linesep\n input_toml += f'B = \\\"{input[\"B\"]}\\\"' + os.linesep\n input_toml += os.linesep\n\n with working_directory(self.path):\n with open('input.toml', 'w') as inp_fh:\n inp_fh.write(input_toml)\n\n cmd = f'{sys.executable} {gdock_exec} --dry input.toml'\n\n subprocess.call(cmd, shell=True)\n\n # retrieve the structures\n output_structures = []\n structure_folder = Path(self.path, 'gdock-integration/structures')\n for model in structure_folder.glob('*pdb'):\n pdb = PDBFile(model, path=model.parent)\n pdb.score = .0\n pdb.topology = topologies\n output_structures.append(pdb)\n\n io = ModuleIO()\n io.add(output_structures, \"o\")\n io.save(self.path)\n", "sub_path": "src/haddock/modules/sampling/gdock/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 26, "usage_type": "call"}, {"api_name": "haddock.modules.BaseHaddockModule", "line_number": 37, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "call"}, {"api_name": "haddock.libs.libutil.check_subprocess", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 47, "usage_type": "attribute"}, {"api_name": "haddock.log.info", "line_number": 50, "usage_type": "call"}, {"api_name": "haddock.log", "line_number": 50, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 59, "usage_type": "call"}, {"api_name": "haddock.libs.libontology.Format.PDB", "line_number": 64, "usage_type": "attribute"}, {"api_name": "haddock.libs.libontology.Format", "line_number": 64, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 66, "usage_type": "call"}, {"api_name": "haddock.libs.libontology.Format.TOPOLOGY", "line_number": 70, "usage_type": "attribute"}, {"api_name": "haddock.libs.libontology.Format", "line_number": 70, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 72, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 73, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 79, "usage_type": "call"}, {"api_name": "haddock.libs.libpdb.identify_chainseg", "line_number": 80, "usage_type": "call"}, {"api_name": "haddock.libs.libpdb", "line_number": 80, "usage_type": "name"}, {"api_name": "haddock.log.info", "line_number": 82, "usage_type": "call"}, {"api_name": "haddock.log", "line_number": 82, "usage_type": "name"}, {"api_name": "haddock.libs.libpdb.swap_segid_chain", "line_number": 83, "usage_type": "call"}, {"api_name": "haddock.libs.libpdb", "line_number": 83, "usage_type": "name"}, {"api_name": "os.linesep", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 107, "usage_type": "attribute"}, {"api_name": "haddock.modules.working_directory", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 113, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 115, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 119, "usage_type": "call"}, {"api_name": "haddock.libs.libontology.PDBFile", "line_number": 121, "usage_type": "call"}, {"api_name": "haddock.libs.libontology.ModuleIO", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "58199368", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'20160729_week1_homework'\n\n_author_='wangjianfeng'\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\n\ndef get_info(urls,data=None):\n for url in urls:\n time.sleep(2)\n wb_data=requests.get(url).text\n soup=BeautifulSoup(wb_data,'lxml')\n category=soup.select('span:nth-of-type(4)')\n title=soup.select('h1.info_titile')\n price=soup.select('div.price_li > span > i')\n address=soup.select('div.palce_li > span > i')\n tags=soup.select('div.biaoqian_li')\n scan=soup.select('span.look_time')\n\n for category,title,price,address,tags,scan in zip(category,title,price,address,tags,scan):\n data={\n 'category':category.get_text().strip(),\n 'title':title.get_text(),\n 'price':price.get_text(),\n 'address':address.get_text(),\n 'tages':list(tags.stripped_strings),\n 'scan':scan.get_text(),\n }\n print(data)\n\nif __name__=='__main__':\n page_num=2\n url=[]\n page_url=['http://bj.58.com/pbdn/0/pn{}/'.format(str(1)) for i in range(1,page_num)][0]\n page_data=requests.get(page_url).text\n soup=BeautifulSoup(page_data,'lxml')\n urls=soup.select('tr.zzinfo > td.img > a')\n for i in urls:\n url.append(i.get('href'))\n # print(i.get('href'))\nget_info(url)", "sub_path": "Week_1/week1_homework/answer_of_homework/homework_mine.py", "file_name": "homework_mine.py", "file_ext": "py", "file_size_in_byte": 1408, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "time.sleep", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "69358534", "text": "from json_database.utils import fuzzy_match, match_one\nfrom json_database import JsonDatabase, JsonStorageXDG\n\n\nclass Query:\n def __init__(self, db):\n if isinstance(db, JsonDatabase):\n self.result = [a for _, a in db.db.items()]\n else:\n self.result = [a for _, a in db.items()]\n\n def contains_key(self, key, fuzzy=False, thresh=0.7):\n if fuzzy:\n after = []\n for e in self.result:\n filter = True\n for k in e:\n score = fuzzy_match(k, key)\n if score < thresh:\n continue\n filter = False\n if not filter:\n after.append(e)\n self.result = after\n else:\n self.result = [a for a in self.result if a.get(key)]\n return self\n\n def contains_value(self, key, value, fuzzy=False, thresh=0.75):\n self.contains_key(key)\n if fuzzy:\n after = []\n for e in self.result:\n if isinstance(e[key], str):\n score = fuzzy_match(value, e[key])\n if score > thresh:\n after.append(e)\n elif isinstance(e[key], list) or isinstance(e[key], dict):\n v, score = match_one(value, e[key])\n if score < thresh:\n continue\n after.append(e)\n self.result = after\n else:\n self.result = [a for a in self.result if value in a[key]]\n return self\n\n def value_contains(self, key, value, ignore_case=False):\n self.contains_key(key)\n if ignore_case:\n after = []\n value = str(value).lower()\n for e in self.result:\n if isinstance(e[key], str):\n if value in e[key].lower():\n after.append(e)\n elif isinstance(e[key], list):\n if value in [str(_).lower() for _ in e[key]]:\n after.append(e)\n elif isinstance(e[key], dict):\n if value in [str(_).lower() for _ in e[key].keys()]:\n after.append(e)\n self.result = after\n else:\n self.result = [e for e in self.result if value in e[key]]\n return self\n\n def value_contains_token(self, key, value, ignore_case=False):\n self.contains_key(key)\n after = []\n value = str(value)\n for e in self.result:\n if isinstance(e[key], str):\n if ignore_case:\n if value.lower() in e[key].lower().split(\" \"):\n after.append(e)\n else:\n if value in e[key].split(\" \"):\n after.append(e)\n elif value in e[key]:\n after.append(e)\n self.result = after\n return self\n\n def equal(self, key, value):\n self.contains_key(key)\n self.result = [a for a in self.result if a[key] == value]\n return self\n\n def bellow(self, key, value):\n self.contains_key(key)\n self.result = [a for a in self.result if a[key] < value]\n return self\n\n def above(self, key, value):\n self.contains_key(key)\n self.result = [a for a in self.result if a[key] > value]\n return self\n\n def bellow_or_equal(self, key, value):\n self.contains_key(key)\n self.result = [a for a in self.result if a[key] <= value]\n return self\n\n def above_or_equal(self, key, value):\n self.contains_key(key)\n self.result = [a for a in self.result if a[key] >= value]\n return self\n\n def in_range(self, key, min_value, max_value):\n self.contains_key(key)\n self.result = [a for a in self.result if min_value < a[key] < max_value]\n return self\n\n def all(self):\n return self\n\n def build(self):\n return self.result\n\n\n", "sub_path": "json_database/search.py", "file_name": "search.py", "file_ext": "py", "file_size_in_byte": 4013, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json_database.JsonDatabase", "line_number": 7, "usage_type": "argument"}, {"api_name": "json_database.utils.fuzzy_match", "line_number": 18, "usage_type": "call"}, {"api_name": "json_database.utils.fuzzy_match", "line_number": 35, "usage_type": "call"}, {"api_name": "json_database.utils.match_one", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "352266094", "text": "import datetime\nimport subprocess\nimport time\n\nimport common\nimport tptp\n\nargs = common.args_problems()\ncodes = common.get_error_codes()\nproblems = tptp.get_problems(args)\n\nstart = time.time()\ntried = 0\nsolved = 0\nhardest = {}\n\ntry:\n for file in problems:\n print(file)\n tptp.print_header(file)\n expected = tptp.get_expected(file)\n\n cmd = \"./ayane\", \"-t\", str(args.time), file\n t = time.time()\n p = subprocess.run(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding=\"utf-8\"\n )\n t = time.time() - t\n print(\"%.3f seconds\" % t)\n s = p.stdout\n print(s)\n\n code = codes.get(p.returncode, p.returncode)\n if code == \"inappropriateError\":\n continue\n tried += 1\n if code in (-14, 4294967282):\n continue\n if code:\n raise Exception(code)\n\n r = None\n if s.startswith(\"sat\"):\n r = \"sat\"\n elif s.startswith(\"unsat\"):\n r = \"unsat\"\n tptp.check(r, expected)\n\n if r:\n solved += 1\n if t > hardest.get(r, (0, 0))[1]:\n hardest[r] = file, t\nexcept KeyboardInterrupt:\n print()\n\nprint(\"Total time\")\nt = time.time() - start\nprint(datetime.timedelta(seconds=t))\nprint()\n\nif hardest:\n print(\"Hardest solved\")\n if \"sat\" in hardest:\n print(\"sat\\t%s\\t%.3f\" % hardest[\"sat\"])\n if \"unsat\" in hardest:\n print(\"unsat\\t%s\\t%.3f\" % hardest[\"unsat\"])\n print()\n\nif tried:\n print(\"Success rate\")\n print(f\"{solved}/{tried}\")\n print(\"%f%%\" % (float(solved) / tried * 100))\n", "sub_path": "script/batch_tptp.py", "file_name": "batch_tptp.py", "file_ext": "py", "file_size_in_byte": 1635, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "common.args_problems", "line_number": 8, "usage_type": "call"}, {"api_name": "common.get_error_codes", "line_number": 9, "usage_type": "call"}, {"api_name": "tptp.get_problems", "line_number": 10, "usage_type": "call"}, {"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "tptp.print_header", "line_number": 20, "usage_type": "call"}, {"api_name": "tptp.get_expected", "line_number": 21, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 25, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "tptp.check", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "229894736", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nfrom concurrent import futures\nimport msgpack\nimport msgpack_numpy\nimport numpy as np\nimport os\nimport tempfile\nimport tqdm\nimport threading\nimport subprocess\nimport sys\n\nimport graph_map\nimport problem\n\n_NUM_AGENTS = 25\n_NUM_HUBS = 10\n_NUM_TASKS = 5\n_NUM_NODES = 200\n_NUM_GRAPHS = 50\n_NUM_SAMPLES = 200\n_NUM_SAMPLES_GT = 10\n_NUM_THREADS = 32\n\n# Variable.\n_CORRELATION_STRENGTH = np.linspace(.1, .9, 9).tolist()\n_DEPLOYMENT_SIZE = range(_NUM_TASKS, _NUM_AGENTS + 1, 2)\n_TOP_K = [1, 2, 4, 8, 16]\n\n# Fixed.\n_BASE_CORRELATION_STRENGTH = .9\n_BASE_DEPLOYMENT_SIZE = 20\n_BASE_TOP_K = 4\n\n\nArguments = collections.namedtuple('Arguments', [\n 'deployment_size', 'top_k', 'correlation_strength'])\nArguments.__new__.__defaults__ = (_BASE_DEPLOYMENT_SIZE, _BASE_TOP_K, _BASE_CORRELATION_STRENGTH)\n\nOldArguments = collections.namedtuple('OldArguments', [\n 'deployment_size', 'top_k', 'correlation_sparsity', 'correlation_strength'])\nOldArguments.__new__.__defaults__ = (_BASE_DEPLOYMENT_SIZE, _BASE_TOP_K, .3, _BASE_CORRELATION_STRENGTH)\n\n\ndef store_results(results, filename):\n with open(filename, 'wb') as fp:\n buf = msgpack.packb(results, use_bin_type=True)\n fp.write(buf)\n\n\ndef read_results(filename):\n with open(filename, 'rb') as fp:\n r = msgpack.unpackb(fp.read(), raw=False)\n os.remove(filename)\n return r\n\n\ndef run_problem(filename, arguments):\n graph = graph_map.GraphMap(_NUM_NODES, arguments.top_k,\n largest_correlation=arguments.correlation_strength)\n agents = np.random.randint(_NUM_HUBS, size=_NUM_AGENTS)\n tasks = np.random.randint(_NUM_HUBS, graph.num_nodes, size=_NUM_TASKS)\n p = problem.Problem(graph, agents, tasks, num_samples=_NUM_SAMPLES,\n num_groundtruth_samples=_NUM_SAMPLES_GT,\n aggregation=problem.MinimumAggregation())\n\n results = {\n 'lower_bound': ([], []),\n 'hungarian': ([], []),\n # 'repeated_hungarian': ([], []),\n # 'greedy': ([], []),\n 'random': ([], []),\n # 'no_correlation_greedy': ([], []),\n 'closest': ([], []),\n }\n p.reset()\n for algorithm, (costs, correlations) in results.items():\n cost = getattr(p, algorithm)(arguments.deployment_size)\n correlation = p.get_correlation()\n costs.extend(cost)\n correlations.append(correlation)\n\n store_results(results, filename)\n\n\ndef run_task(filename, arguments):\n args = [sys.executable, __file__, '--output', filename]\n for field in Arguments._fields:\n args.append('--{}'.format(field))\n args.append(str(getattr(arguments, field)))\n return subprocess.call(args)\n\n\ndef done(fn, counter):\n counter.inc()\n\n\nclass AtomicProgressBar(object):\n def __init__(self, total):\n self._value = 0\n self._lock = threading.Lock()\n self._tqdm = tqdm.tqdm(total=total)\n\n def inc(self):\n with self._lock:\n self._value += 1\n self._tqdm.update(1)\n\n def close(self):\n self._tqdm.close()\n\n\ndef run(final_filename):\n directory = tempfile.mkdtemp()\n\n args = set()\n for d in _DEPLOYMENT_SIZE:\n args.add(Arguments(deployment_size=d))\n for top_k in _TOP_K:\n args.add(Arguments(top_k=top_k))\n for correlation_strength in _CORRELATION_STRENGTH:\n args.add(Arguments(correlation_strength=correlation_strength))\n all_args = []\n for arg in sorted(args):\n all_args.extend([arg] * _NUM_GRAPHS)\n\n threads = []\n executor = futures.ProcessPoolExecutor(max_workers=_NUM_THREADS)\n counter = AtomicProgressBar(len(all_args))\n for i, a in enumerate(all_args):\n filename = os.path.join(directory, 'results_{}.bin'.format(i))\n threads.append((executor.submit(run_task, filename, a), filename, i))\n threads[-1][0].add_done_callback(lambda fn: done(fn, counter))\n\n all_results = collections.defaultdict(dict)\n for thread, filename, idx in threads:\n if thread.result() != 0:\n raise ValueError('Error while running a process.')\n thread_results = read_results(filename)\n results = all_results[all_args[idx]]\n if not results:\n results.update(thread_results)\n continue\n for algorithm, (costs, correlations) in thread_results.items():\n results[algorithm][0].extend(costs)\n results[algorithm][1].extend(correlations)\n\n all_results = dict(all_results) # Remove defaultdict.\n store_results(all_results, final_filename)\n\n for args, results in all_results.items():\n print('Results for', args)\n baseline_costs = np.array(results['hungarian'][0], np.float32)\n for algorithm, (costs, correlations) in results.items():\n c = np.array(costs, np.float32)\n print(' Cost (%s): %g - correlation: %g' % (algorithm, np.mean(c / baseline_costs), np.mean(correlations)))\n\n\nif __name__ == '__main__':\n msgpack_numpy.patch() # Magic.\n\n parser = argparse.ArgumentParser(description='Launches a battery of experiments in parallel')\n parser.add_argument('--output_results', action='store', default=None, help='Where to store results.')\n\n # Internal arguments.\n parser.add_argument('--output', action='store', default=None)\n defaults = Arguments()\n for field in Arguments._fields:\n v = getattr(defaults, field)\n parser.add_argument('--{}'.format(field), type=type(v), action='store', default=v)\n args = parser.parse_args()\n\n if args.output:\n run_problem(args.output,\n Arguments(args.deployment_size,\n args.top_k,\n args.correlation_strength))\n else:\n assert args.output_results, 'Must specify --output_results'\n run(args.output_results)\n", "sub_path": "launch_experiments.py", "file_name": "launch_experiments.py", "file_ext": "py", "file_size_in_byte": 5638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.linspace", "line_number": 31, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 41, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 45, "usage_type": "call"}, {"api_name": "msgpack.packb", "line_number": 52, "usage_type": "call"}, {"api_name": "msgpack.unpackb", "line_number": 58, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 59, "usage_type": "call"}, {"api_name": "graph_map.GraphMap", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "problem.Problem", "line_number": 68, "usage_type": "call"}, {"api_name": "problem.MinimumAggregation", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 92, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 96, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 106, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 107, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 119, "usage_type": "call"}, {"api_name": "concurrent.futures.ProcessPoolExecutor", "line_number": 133, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 133, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 161, "usage_type": "call"}, {"api_name": "msgpack_numpy.patch", "line_number": 165, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "371573222", "text": "# Copyright (c) 2008-2016 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"\n===================\nIsentropic Analysis\n===================\n\nThe MetPy function `mcalc.isentropic_interpolation` allows for isentropic analysis from model\nanalysis data in isobaric coordinates.\n\"\"\"\n\n########################################\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset, num2date\nimport numpy as np\n\nimport metpy.calc as mcalc\nfrom metpy.cbook import get_test_data\nfrom metpy.units import units\n\n#######################################\n# **Getting the data**\n#\n# In this example, NARR reanalysis data for 18 UTC 04 April 1987 from the National Centers\n# for Environmental Information (https://nomads.ncdc.noaa.gov) will be used.\n\ndata = Dataset(get_test_data('narr_example.nc', False))\n\n##########################\nprint(list(data.variables))\n\n#############################\n# We will reduce the dimensionality of the data as it is pulled in to remove an empty time\n# dimension. Additionally, units are required for input data, so the proper units will also\n# be attached.\n\n\n# Assign data to variable names\ndtime = data.variables['Geopotential_height'].dimensions[0]\ndlev = data.variables['Geopotential_height'].dimensions[1]\nlat = data.variables['lat'][:]\nlon = data.variables['lon'][:]\nlev = data.variables[dlev][:] * units(data.variables[dlev].units)\ntimes = data.variables[dtime]\nvtimes = num2date(times[:], times.units)\n\ntemps = data.variables['Temperature']\ntmp = temps[0, :] * units.kelvin\nuwnd = data.variables['u_wind'][0, :] * units(data.variables['u_wind'].units)\nvwnd = data.variables['v_wind'][0, :] * units(data.variables['v_wind'].units)\nhgt = data.variables['Geopotential_height'][0, :] * units.meter\nspech = (data.variables['Specific_humidity'][0, :] *\n units(data.variables['Specific_humidity'].units))\n\n#############################\n# To properly interpolate to isentropic coordinates, the function must know the desired output\n# isentropic levels. An array with these levels will be created below.\n\nisentlevs = [296.] * units.kelvin\n\n####################################\n# **Conversion to Isentropic Coordinates**\n#\n# Once three dimensional data in isobaric coordinates has been pulled and the desired\n# isentropic levels created, the conversion to isentropic coordinates can begin. Data will be\n# passed to the function as below. The function requires that isentropic levels, isobaric\n# levels, and temperature be input. Any additional inputs (in this case relative humidity, u,\n# and v wind components) will be linearly interpolated to isentropic space.\n\nisent_anal = mcalc.isentropic_interpolation(isentlevs,\n lev,\n tmp,\n spech,\n uwnd,\n vwnd,\n hgt,\n tmpk_out=True)\n\n#####################################\n# The output is a list, so now we will separate the variables to different names before\n# plotting.\n\n\nisentprs = isent_anal[0]\nisenttmp = isent_anal[1]\nisentspech = isent_anal[2]\nisentu = isent_anal[3].to('kt')\nisentv = isent_anal[4].to('kt')\nisenthgt = isent_anal[5]\n\n########################################\n# A quick look at the shape of these variables will show that the data is now in isentropic\n# coordinates, with the number of vertical levels as specified above.\n\nprint(isentprs.shape)\nprint(isentspech.shape)\nprint(isentu.shape)\nprint(isentv.shape)\nprint(isenttmp.shape)\nprint(isenthgt.shape)\n\n#################################\n# **Converting to Relative Humidity**\n#\n# The NARR only gives specific humidity on isobaric vertical levels, so relative humidity will\n# have to be calculated after the interpolation to isentropic space.\n\nisentrh = mcalc.relative_humidity_from_specific_humidity(isentspech, isenttmp, isentprs)\n\n#######################################\n# **Plotting the Isentropic Analysis**\n\n\n# Set up our projection\ncrs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)\n\n\n# Set up our array of latitude and longitude values and transform to\n# the desired projection.\ntlatlons = crs.transform_points(ccrs.PlateCarree(), lon, lat)\ntlons = tlatlons[:, :, 0]\ntlats = tlatlons[:, :, 1]\n\n# Coordinates to limit map area\nbounds = [(-122., -75., 25., 50.)]\n# Choose a level to plot, in this case 296 K\nlevel = 0\n\n# Get data to plot state and province boundaries\nstates_provinces = cfeature.NaturalEarthFeature(category='cultural',\n name='admin_1_states_provinces_lakes',\n scale='50m',\n facecolor='none')\n\nfig = plt.figure(1, figsize=(17., 12.))\nax = plt.subplot(111, projection=crs)\nax.set_extent(*bounds, crs=ccrs.PlateCarree())\nax.coastlines('50m', edgecolor='black', linewidth=0.75)\nax.add_feature(states_provinces, edgecolor='black', linewidth=0.5)\n\n# Plot the surface\nclevisent = np.arange(0, 1000, 25)\ncs = ax.contour(tlons, tlats, isentprs[level, :, :], clevisent,\n colors='k', linewidths=1.0, linestyles='solid')\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=7,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Plot RH\ncf = ax.contourf(tlons, tlats, isentrh[level, :, :], range(10, 106, 5),\n cmap=plt.cm.gist_earth_r)\ncb = plt.colorbar(cf, orientation='horizontal', extend=max, aspect=65, shrink=0.5, pad=0,\n extendrect='True')\ncb.set_label('Relative Humidity', size='x-large')\n\n# Transform Vectors before plotting, then plot wind barbs.\nut, vt = crs.transform_vectors(ccrs.PlateCarree(), lon, lat, isentu[level, :, :].m,\n isentv[level, :, :].m)\nax.barbs(tlons, tlats, ut, vt, length=6, regrid_shape=20)\n\n# Make some titles\nplt.title('{:.0f} K Isentropic Pressure (hPa), Wind (kt), Relative Humidity (percent)'\n .format(isentlevs[level].m),\n loc='left')\nplt.title('VALID: {:s}'.format(str(vtimes[0])), loc='right')\nplt.tight_layout()\n\n######################################\n# **Montgomery Streamfunction**\n#\n# The Montgomery Streamfunction, :math:`{\\psi} = gdz + CpT`, is often desired because its\n# gradient is proportional to the geostrophic wind in isentropic space. This can be easily\n# calculated with `mcalc.montgomery_streamfunction`.\n\n\n# Calculate Montgomery Streamfunction and scale by 10^-2 for plotting\nmsf = mcalc.montgomery_streamfunction(isenthgt, isenttmp) / 100.\n\n# Choose a level to plot, in this case 296 K\nlevel = 0\n\nfig = plt.figure(1, figsize=(17., 12.))\nax = plt.subplot(111, projection=crs)\nax.set_extent(*bounds, crs=ccrs.PlateCarree())\nax.coastlines('50m', edgecolor='black', linewidth=0.75)\nax.add_feature(states_provinces, edgecolor='black', linewidth=0.5)\n\n# Plot the surface\nclevmsf = np.arange(0, 4000, 5)\ncs = ax.contour(tlons, tlats, msf[level, :, :], clevmsf,\n colors='k', linewidths=1.0, linestyles='solid')\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=7,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n# Plot RH\ncf = ax.contourf(tlons, tlats, isentrh[level, :, :], range(10, 106, 5),\n cmap=plt.cm.gist_earth_r)\ncb = plt.colorbar(cf, orientation='horizontal', extend=max, aspect=65, shrink=0.5, pad=0,\n extendrect='True')\ncb.set_label('Relative Humidity', size='x-large')\n\n# Transform Vectors before plotting, then plot wind barbs.\nut, vt = crs.transform_vectors(ccrs.PlateCarree(), lon, lat, isentu[level, :, :].m,\n isentv[level, :, :].m)\nax.barbs(tlons, tlats, ut, vt, length=6, regrid_shape=20)\n\n# Make some titles\nplt.title('{:.0f} K Montgomery Streamfunction '.format(isentlevs[level].m) +\n r'($10^{-2} m^2 s^{-2}$), ' +\n 'Wind (kt), Relative Humidity (percent)', loc='left')\nplt.title('VALID: {:s}'.format(str(vtimes[0])), loc='right')\nplt.tight_layout()\n", "sub_path": "examples/isentropic_example.py", "file_name": "isentropic_example.py", "file_ext": "py", "file_size_in_byte": 8190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "netCDF4.Dataset", "line_number": 30, "usage_type": "call"}, {"api_name": "metpy.cbook.get_test_data", "line_number": 30, "usage_type": "call"}, {"api_name": "metpy.units.units", "line_number": 46, "usage_type": "call"}, {"api_name": "netCDF4.num2date", "line_number": 48, "usage_type": "call"}, {"api_name": "metpy.units.units.kelvin", "line_number": 51, "usage_type": "attribute"}, {"api_name": "metpy.units.units", "line_number": 51, "usage_type": "name"}, {"api_name": "metpy.units.units", "line_number": 52, "usage_type": "call"}, {"api_name": "metpy.units.units", "line_number": 53, "usage_type": "call"}, {"api_name": "metpy.units.units.meter", "line_number": 54, "usage_type": "attribute"}, {"api_name": "metpy.units.units", "line_number": 54, "usage_type": "name"}, {"api_name": "metpy.units.units", "line_number": 56, "usage_type": "call"}, {"api_name": "metpy.units.units.kelvin", "line_number": 62, "usage_type": "attribute"}, {"api_name": "metpy.units.units", "line_number": 62, "usage_type": "name"}, {"api_name": "metpy.calc.isentropic_interpolation", "line_number": 73, "usage_type": "call"}, {"api_name": "metpy.calc", "line_number": 73, "usage_type": "name"}, {"api_name": "metpy.calc.relative_humidity_from_specific_humidity", "line_number": 111, "usage_type": "call"}, {"api_name": "metpy.calc", "line_number": 111, "usage_type": "name"}, {"api_name": "cartopy.crs.LambertConformal", "line_number": 118, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 118, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 123, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 123, "usage_type": "name"}, {"api_name": "cartopy.feature.NaturalEarthFeature", "line_number": 133, "usage_type": "call"}, {"api_name": "cartopy.feature", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 140, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clabel", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 153, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 159, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "metpy.calc.montgomery_streamfunction", "line_number": 179, "usage_type": "call"}, {"api_name": "metpy.calc", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 186, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clabel", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 198, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 204, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}]} +{"seq_id": "641297822", "text": "# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\nfrom strongme.articles.views import ArticleList, ArticleDetail\n\n__author__ = 'sidchik'\n\nurlpatterns = patterns('',\n url(r'^$', ArticleList.as_view(), name='article_list'),\n # url(r'^category/(?P\\d+)/$', ShopProductList.as_view(), name='shop_category_detail'),\n url(r'^(?P\\d+)/$', ArticleDetail.as_view(), name='article_detail'),\n\n\n)", "sub_path": "strongme/articles/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "strongme.articles.views.ArticleList.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "strongme.articles.views.ArticleList", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "strongme.articles.views.ArticleDetail.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "strongme.articles.views.ArticleDetail", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "585005048", "text": "# BSD LICENSE\n#\n# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nDPDK Test suite.\n\"\"\"\nimport re\nimport utils\nfrom test_case import TestCase\nfrom etgen import IxiaPacketGenerator\n\n\nclass TestDistributor(TestCase, IxiaPacketGenerator):\n def set_up_all(self):\n \"\"\"\n Run at the start of each test suite.\n \"\"\"\n self.tester.extend_external_packet_generator(TestDistributor, self)\n\n # reduce tx queues for enable many workers\n self.dut.send_expect(\"sed -i -e 's/.*txRings = .*/\\\\tconst uint16_t rxRings = 1, txRings = 1;/' ./examples/distributor/main.c\", \"#\")\n out = self.dut.build_dpdk_apps(\"./examples/distributor\")\n self.verify(\"Error\" not in out, \"Compilation error\")\n self.verify(\"No such\" not in out, \"Compilation error\")\n\n self.dut_ports = self.dut.get_ports()\n self.app = \"./examples/distributor/build/distributor_app\"\n\n def set_up(self):\n \"\"\"\n Run before each test case.\n \"\"\"\n pass\n\n def test_distributor_unit(self):\n \"\"\"\n Run distributor unit test\n \"\"\"\n self.dut.send_expect(\"./%s/app/test -n 1 -c f\" % self.target, \"RTE>>\", 60)\n out = self.dut.send_expect(\"distributor_autotest\", \"RTE>>\", 30)\n self.dut.send_expect(\"quit\", \"# \")\n self.verify(\"Test OK\" in out, \"Test failed\")\n\n def test_distributor_unit_perf(self):\n \"\"\"\n Run distributor unit perf test\n \"\"\"\n self.dut.send_expect(\"./%s/app/test -n 1 -c f\" % self.target, \"RTE>>\", 60)\n out = self.dut.send_expect(\"distributor_perf_autotest\", \"RTE>>\", 120)\n cycles_single = self.strip_cycles(out, \"single\")\n cycles_burst = self.strip_cycles(out, \"burst\")\n self.logger.info(\"Cycles for single mode is %d burst mode is %d\"\n % (cycles_single, cycles_burst))\n self.dut.send_expect(\"quit\", \"# \")\n self.verify(\"Test OK\" in out, \"Test failed\")\n self.verify(cycles_single > cycles_burst * 2,\n \"Burst performance should be much better\")\n\n def test_perf_distributor(self):\n \"\"\"\n Run distributor perf test, recorded statistic of Rx/Enqueue/Sent/Dequeue/Tx\n \"\"\"\n self.verify(len(self.dut_ports) >= 1, \"Not enough ports\")\n workers = [1, 2, 3, 4, 8, 16, 32]\n table_header = [\"Number of workers\",\n \"Throughput Rate Rx received\",\n \"Throughput Rate Rx core enqueued\",\n \"Throughput Rate Distributor Sent\",\n \"Throughput Rate Tx core dequeued\",\n \"Throughput Rate Tx transmitted\",\n \"Throughput Rate Pkts out\",\n \"Throughput Rate Pkts out line rate\"]\n\n # output port is calculated from overall ports number\n cmd_fmt = \"%s -c %s -n %d -w %s -- -p 0x1\"\n socket = self.dut.get_numa_id(self.dut_ports[0])\n\n self.tester.scapy_append('wrpcap(\"distributor.pcap\", [Ether()/IP()/(\"X\"*26)])')\n self.tester.scapy_execute()\n tgen_input = []\n rx_port = self.tester.get_local_port(self.dut_ports[0])\n tx_port = self.tester.get_local_port(self.dut_ports[0])\n tgen_input.append((tx_port, rx_port, \"distributor.pcap\"))\n\n self.result_table_create(table_header)\n for worker_num in workers:\n # Rx core/distributor core/Tx core/stats core\n cores = self.dut.get_core_list(\"1S/%dC/1T\" % (worker_num + 4), socket)\n # If can't get enough core from one socket, just use all lcores\n if len(cores) < (worker_num + 4):\n cores = self._get_thread_lcore(worker_num + 4)\n\n cmd = cmd_fmt % (self.app, utils.create_mask(cores),\n self.dut.get_memory_channels(),\n self.dut.get_port_pci(self.dut_ports[0]))\n\n self.dut.send_expect(cmd, \"doing packet RX\", timeout=30)\n\n self.tester.ixia_packet_gen.hook_transmission_func = self.hook_transmission_func\n _, pps = self.tester.traffic_generator_throughput(tgen_input, delay=2)\n\n self.dut.send_expect(\"^C\", \"#\")\n\n pps /= 1000000.0\n rx, enq, sent, deq, trans = self.strip_performance_data(self.app_output)\n rate = pps * 100 / float(self.wirespeed(self.nic, 64, 1))\n self.result_table_add([worker_num, rx, enq, sent, deq, trans, pps, float('%.3f' % rate)])\n\n self.result_table_print()\n\n def test_maximum_workers(self):\n \"\"\"\n Check distributor app work fine with maximum workers\n \"\"\"\n self.verify(len(self.dut_ports) >= 1, \"Not enough ports\")\n cmd_fmt = \"%s -c %s -n %d -w %s -- -p 0x1\"\n\n out = self.dut.send_expect(\"sed -n '/#define RTE_DISTRIB_MAX_WORKERS/p' lib/librte_distributor/rte_distributor_private.h\", \"# \")\n reg_match = r\"#define RTE_DISTRIB_MAX_WORKERS (.*)\"\n m = re.match(reg_match, out)\n self.verify(m, \"Can't find maximum worker number\")\n\n max_workers = int(m.group(1))\n cores = self._get_thread_lcore(max_workers - 1 + 4)\n\n cmd = cmd_fmt % (self.app, utils.create_mask(cores),\n self.dut.get_memory_channels(),\n self.dut.get_port_pci(self.dut_ports[0]))\n\n self.dut.send_expect(cmd, \"doing packet RX\", timeout=30)\n\n tx_port = self.tester.get_local_port(self.dut_ports[0])\n tgen_input = [(tx_port, tx_port)]\n self.tester.check_random_pkts(tgen_input, pktnum=256, seq_check=True)\n\n self.dut.send_expect(\"^C\", \"#\")\n\n def test_multiple_ports(self):\n \"\"\"\n Check distributor app work fine with multiple ports\n \"\"\"\n self.verify(len(self.dut_ports) >= 2, \"Not enough ports\")\n cmd_fmt = \"%s -c %s -n %d -w %s -w %s -- -p 0x3\"\n socket = self.dut.get_numa_id(self.dut_ports[0])\n cores = self.dut.get_core_list(\"1S/%dC/1T\" % (2 + 4), socket)\n\n cmd = cmd_fmt % (self.app, utils.create_mask(cores),\n self.dut.get_memory_channels(),\n self.dut.get_port_pci(self.dut_ports[0]),\n self.dut.get_port_pci(self.dut_ports[1]))\n\n self.dut.send_expect(cmd, \"doing packet RX\", timeout=30)\n\n tx_port = self.tester.get_local_port(self.dut_ports[0])\n rx_port = self.tester.get_local_port(self.dut_ports[1])\n tgen_input = [(tx_port, rx_port)]\n self.tester.check_random_pkts(tgen_input, pktnum=256, seq_check=True)\n\n tgen_input = [(rx_port, tx_port)]\n self.tester.check_random_pkts(tgen_input, pktnum=256, seq_check=True)\n\n self.dut.send_expect(\"^C\", \"#\")\n\n def _get_thread_lcore(self, core_num):\n def strip_core(x):\n return(int(x['thread']))\n cores = map(strip_core, self.dut.cores[0:core_num])\n return cores\n\n def hook_transmission_func(self):\n self.app_output = self.dut.session.get_session_before(timeout=2)\n\n def strip_performance_data(self, output=\"\"):\n \"\"\"\n Strip throughput of each stage in threads\n RX Thread:\n Port 0 Pktsin :\n - Received:\n - Returned:\n - Enqueued:\n - Dropped:\n Distributor thread:\n - In:\n - Returned:\n - Sent:\n - Dropped:\n TX thread:\n - Dequeued:\n Port 0 Pktsout:\n - Transmitted:\n - Dropped:\n \"\"\"\n # skip the last one, we use the next one\n output = output[:output.rfind(\"RX Thread\")]\n output = output[output.rfind(\"RX Thread\"):]\n rec_rate = 0.0\n enq_rate = 0.0\n sent_rate = 0.0\n deq_rate = 0.0\n trans_rate = 0.0\n for line in output.splitlines():\n if \"Received\" in line:\n rec_rate = float(line.split()[2])\n elif \"Enqueued\" in line:\n enq_rate = float(line.split()[2])\n elif \"Sent\" in line:\n sent_rate = float(line.split()[2])\n elif \"Dequeued\" in line:\n deq_rate = float(line.split()[2])\n elif \"Transmitted\" in line:\n trans_rate = float(line.split()[2])\n\n return (rec_rate, enq_rate, sent_rate, deq_rate, trans_rate)\n\n def strip_cycles(self, out=\"\", mode=\"single\"):\n \"\"\"\n Strip per packet cycles from output like:\n Time per burst: 12542\n Time per packet: 195\n \"\"\"\n out = out[out.index(\"%s mode\" % mode):]\n lines = out.splitlines()\n cycles = lines[2].split()[3]\n return int(cycles)\n\n def ip(self, port, frag, src, proto, tos, dst, chksum, len, options, version, flags, ihl, ttl, id):\n self.add_tcl_cmd(\"protocol config -name ip\")\n self.add_tcl_cmd('ip config -sourceIpAddr \"%s\"' % src)\n self.add_tcl_cmd(\"ip config -sourceIpAddrMode ipIdle\")\n self.add_tcl_cmd('ip config -destIpAddr \"%s\"' % dst)\n self.add_tcl_cmd(\"ip config -destIpAddrMode ipRandom\")\n self.add_tcl_cmd(\"ip config -ttl %d\" % ttl)\n self.add_tcl_cmd(\"ip config -totalLength %d\" % len)\n self.add_tcl_cmd(\"ip config -fragment %d\" % frag)\n self.add_tcl_cmd(\"ip config -ipProtocol ipV4ProtocolReserved255\")\n self.add_tcl_cmd(\"ip config -identifier %d\" % id)\n self.add_tcl_cmd(\"stream config -framesize %d\" % (len + 18))\n self.add_tcl_cmd(\"ip set %d %d %d\" % (self.chasId, port['card'], port['port']))\n\n def tear_down(self):\n \"\"\"\n Run after each test case.\n \"\"\"\n pass\n\n def tear_down_all(self):\n \"\"\"\n Run after each test suite.\n \"\"\"\n self.dut.send_expect(\"sed -i -e 's/.*txRings = .*/\\\\tconst uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;/' ./examples/distributor/main.c\", \"#\")\n pass\n", "sub_path": "tests/TestSuite_distributor.py", "file_name": "TestSuite_distributor.py", "file_ext": "py", "file_size_in_byte": 11466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "test_case.TestCase", "line_number": 41, "usage_type": "name"}, {"api_name": "etgen.IxiaPacketGenerator", "line_number": 41, "usage_type": "name"}, {"api_name": "utils.create_mask", "line_number": 121, "usage_type": "call"}, {"api_name": "re.match", "line_number": 148, "usage_type": "call"}, {"api_name": "utils.create_mask", "line_number": 154, "usage_type": "call"}, {"api_name": "utils.create_mask", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "450228228", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nTest case executor (a.k.a. robot)\n\"\"\"\n\nimport sys, os, time, logging, json,codecs,logging\n\nfrom abc import ABCMeta, abstractmethod\nfrom dom_analyzer import DomAnalyzer\nfrom configuration import Browser\nfrom bs4 import BeautifulSoup\n\n#==============================================================================================================================\n# Selenium Web Driver\n#==============================================================================================================================\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support import expected_conditions \nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\n#==============================================================================================================================\n\nclass Executor():\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def fire_event(self, clickable):\n pass\n\n @abstractmethod\n def fill_form(self, clickable):\n pass\n\n @abstractmethod\n def empty_form(self, clickable):\n pass\n\n @abstractmethod\n def get_source(self):\n pass\n\n @abstractmethod\n def get_screenshot(self):\n pass\n\n @abstractmethod\n def restart_app(self):\n pass\n\n#==============================================================================================================================\n#==============================================================================================================================\n# Selenium Web Driver\n#==============================================================================================================================\nclass SeleniumExecutor():\n def __init__(self, browserID, url):\n #choose the type of browser\n self.browserID = browserID\n self.driver = None\n #link to the url\n self.startUrl = url\n self.main_window = None\n\n #==========================================================================================================================\n # START / END / RESTART\n #==========================================================================================================================\n def start(self):\n #在這裡宣告driver\n try:\n if self.browserID == 1:\n self.driver = webdriver.Firefox()\n elif self.browserID == 2:\n self.driver = webdriver.Chrome()\n elif self.browserID == 3:\n self.driver = webdriver.Ie() \n else: #default in firefox\n print(\"go into exception in SeleniumExe\")\n self.driver = webdriver.Firefox(); \n self.driver.set_window_size(1280,960)\n self.driver.implicitly_wait(15)\n self.driver.set_page_load_timeout(15)\n\n self.main_window = self.driver.current_window_handle\n except Exception as e:\n logging.error(' start driver : %s \\t\\t__from executor.py start()', str(e))\n\n def refresh(self):\n try:\n self.driver.refresh()\n self.check_after_click()\n except Exception as e:\n logging.error(' refresh : %s \\t\\t__from executor.py refresh()', str(e))\n\n def close(self):\n try:\n for handle in self.driver.window_handles:\n self.driver.switch_to_window(handle)\n logging.info(\" closing: %s\", str(self.driver))\n self.driver.quit()\n except Exception as e:\n logging.error(' close : %s \\t\\t__from executor.py close()', str(e))\n\n def restart_app(self):\n self.close()\n self.start()\n\n #==========================================================================================================================\n # FIRE EVENT\n #==========================================================================================================================\n def click_event_by_edge(self, edge):\n self.switch_iframe_and_get_source( edge.get_iframe_list() )\n self.fill_selects( edge.get_selects() )\n self.fill_inputs_text( edge.get_inputs() )\n self.fill_checkboxes( edge.get_checkboxes() )\n self.fill_radios( edge.get_radios() )\n self.fire_event( edge.get_clickable() )\n\n def get_element_by_tag(self, element):\n if element.get_id() and not element.get_id().startswith(DomAnalyzer.serial_prefix):\n return self.driver.find_element_by_id( element.get_id() )\n elif element.get_xpath():\n return self.driver.find_element_by_xpath( element.get_xpath() )\n else:\n return None\n\n def fire_event(self, clickable):\n logging.info(' fire_event: id(%s) xpath(%s)', clickable.get_id(), clickable.get_xpath())\n try:\n element = self.get_element_by_tag(clickable)\n if not element:\n raise ValueError('No id nor xpath for an clickable')\n element.click()\n self.check_after_click()\n except Exception as e:\n logging.error(' Unknown Exception: %s in fire_event: id(%s) xpath(%s) \\t\\t__from executor.py fire_event()',str(e), clickable.get_id(), clickable.get_xpath())\n \n def fill_inputs_text(self, inputs):\n for input_field in inputs:\n try:\n element = self.get_element_by_tag(input_field)\n if not element:\n raise ValueError('No id nor xpath for an input field')\n element.clear()\n element.send_keys(input_field.get_value())\n self.check_after_click()\n except Exception as e:\n logging.error(' Unknown Exception: %s in input: id(%s) xpath(%s) \\t\\t__from executor.py fill_inputs_text()',str(e), input_field.get_id(), input_field.get_xpath())\n \n def fill_selects(self, selects):\n for select_field in selects:\n try:\n element = Select( self.get_element_by_tag(select_field) )\n if not element:\n raise ValueError('No id nor xpath for an select field')\n element.select_by_index( int(select_field.get_selected()) )\n self.check_after_click()\n except Exception as e:\n logging.error(' Unknown Exception: %s in select: id(%s) xpath(%s) \\t\\t__from executor.py fire_event()',str(e), select_field.get_id(), select_field.get_xpath())\n \n def fill_checkboxes(self, checkboxes):\n for checkbox_field in checkboxes:\n try:\n checkbox_list = checkbox_field.get_checkbox_list()\n #clear all\n for checkbox in checkbox_list:\n element = self.get_element_by_tag(checkbox)\n if not element:\n raise ValueError('No id nor xpath for an checkbox')\n if element.is_selected():\n element.click()\n self.check_after_click()\n for selected_id in checkbox_field.get_selected_list():\n selected_element = self.get_element_by_tag( checkbox_list[int(selected_id)] )\n if not selected_element:\n raise ValueError('No id nor xpath for an checkbox')\n selected_element.click()\n self.check_after_click()\n except Exception as e:\n logging.error(' Unknown Exception: %s in checkbox: name(%s) \\t\\t__from executor.py fire_event()'\\\n %( str(e), checkbox_field.get_checkbox_name() ) )\n \n def fill_radios(self, radios):\n for radio_field in radios:\n try:\n selected_id = int(radio_field.get_selected())\n radio_list = radio_field.get_radio_list()\n element = self.get_element_by_tag( radio_list[selected_id] )\n if not element:\n raise ValueError('No id nor xpath for an radio')\n if not element.is_selected():\n element.click()\n self.check_after_click()\n except Exception as e:\n logging.error(' Unknown Exception: %s in radio: name(%s) \\t\\t__from executor.py fire_event()'\\\n % ( str(e), radio_field.get_radio_name() ) )\n \n #==========================================================================================================================\n # GO ON / BACK\n #==========================================================================================================================\n def goto_url(self):\n try:\n self.driver.get(self.startUrl)\n except Exception as e:\n logging.error(' driver get url : %s \\t\\t__from executor.py goto_url()', str(e))\n\n def back_history(self):\n print('back from',self.browserID)\n try:\n time.sleep(1)\n self.driver.back()\n self.check_after_click()\n except Exception as e:\n logging.error(' back : %s \\t\\t__from executor.py back_history()', str(e))\n self.driver.get(self.startUrl)\n\n def forward_history(self):\n try:\n time.sleep(1)\n self.driver.forward()\n self.check_after_click()\n except Exception as e:\n logging.error(' forward : %s \\t\\t__from executor.py forward_history()', str(e))\n\n #==========================================================================================================================\n # GET ELEMENT / GET INFOMATION\n #==========================================================================================================================\n def get_url(self):\n try:\n return self.driver.current_url\n except Exception as e:\n logging.error(' get url : %s \\t\\t__from executor.py get_url()', str(e))\n return 'error url'\n\n def get_source(self):\n try:\n text = self.driver.page_source\n except Exception as e:\n logging.error(' %s \\t\\t__from executor.py get_source()', str(e))\n text = \"ERROR! cannot load file\"\n return text.encode('utf-8')\n\n def switch_iframe_and_get_source(self, iframe_xpath_list=None):\n try:\n self.driver.switch_to_default_content()\n if iframe_xpath_list and iframe_xpath_list[0] != 'None':\n for xpath in iframe_xpath_list: \n iframe = self.driver.find_element_by_xpath(xpath)\n self.driver.switch_to_frame(iframe)\n except Exception as e:\n logging.error(' switch_iframe : %s \\t\\t__from executor.py switch_iframe_and_get_source()', str(e))\n return self.get_source()\n\n def get_screenshot(self, file_path):\n self.driver.get_screenshot_as_file(file_path)\n\n def get_log(self,pathDir):\n #save dom of iframe in list of StateDom [iframe_path_list, dom, url/src, normalize dom]\n if not os.path.exists(pathDir):\n os.makedirs(pathDir) \n file_path = os.path.join(pathDir,'browser_'+str(self.browserID)+'.json')\n url = self.get_url()\n log_list = {'url': url,'filepath': file_path,'log':[]}\n\n try:\n for entry in self.driver.get_log('browser'):\n print(entry)\n log_list['log'].append(entry)\n except Exception as e:\n print(str(e))\n with codecs.open( file_path,'w', encoding='utf-8' ) as f:\n json.dump(log_list, f, indent=3, sort_keys=True, ensure_ascii=False)\n print('===log record finished')\n return log_list\n\n def get_coor(self,pathDir):\n #save dom of iframe in list of StateDom [iframe_path_list, dom, url/src, normalize dom]\n print(\"===get coor\")\n if not os.path.exists(pathDir):\n os.makedirs(pathDir) \n file_path = os.path.join(pathDir,'browser_'+str(self.browserID)+'.json') \n url = self.get_url()\n coor_list = {'url': url,'filepath': file_path,'elements':[]}\n element_list=self.driver.find_elements_by_xpath('//*[@id]')\n for i in element_list:\n single_element={\n 'element_id' :str(i),\n 'tag_name' :str(i.tag_name),\n #'x_path' :i.x_path\n 'type' :\"none\",\n 'name' :\"none\",\n 'coor':{\n 'x' :i.location['x'],\n 'y' :i.location['y'],\n },\n 'size':{\n 'height' :i.size['height'],\n 'width' :i.size['width'], \n },\n }\n store_single_element = False\n #tagname = id, select, a,list\n for j in ['id']:\n if i.get_attribute(j)!=None and i.get_attribute(j)!=\"\":\n single_element['type'] = j\n single_element['name'] = i.get_attribute(j)\n store_single_element=True\n if store_single_element == True:\n coor_list['elements'].append(single_element)\n \n with codecs.open( file_path,'w', encoding='utf-8' ) as f:\n json.dump(coor_list, f, indent=2, sort_keys=True, ensure_ascii=False)\n print('===coor record finished')\n return coor_list\n\n def get_dom_list(self, configuration):\n #save dom of iframe in list of StateDom [iframe_path_list, dom, url/src, normalize dom]\n dom_list = []\n new_dom = self.switch_iframe_and_get_source()\n\n url = self.get_url()\n soup = BeautifulSoup(new_dom, 'html5lib')\n for frame in configuration.get_frame_tags():\n for iframe_tag in soup.find_all(frame):\n iframe_xpath = DomAnalyzer._get_xpath(iframe_tag)\n iframe_src = iframe_tag['src'] if iframe_tag.has_attr('src') else None\n try: #not knowing what error in iframe_tag.clear(): no src\n if configuration.is_dom_inside_iframe():\n self.get_dom_of_iframe(configuration, dom_list, [iframe_xpath], iframe_src)\n iframe_tag.clear()\n except Exception as e:\n logging.error(' get_dom_of_iframe: %s \\t\\t__from crawler.py get_dom_list() ', str(e))\n dom_list.append( {\n 'url' : url,\n 'dom' : str(soup),\n 'iframe_path' : None,\n } )\n brID=self.browserID\n\n return dom_list, url\n\n def get_dom_of_iframe(self, configuration, dom_list, iframe_xpath_list, src):\n dom = self.switch_iframe_and_get_source(iframe_xpath_list)\n soup = BeautifulSoup(dom, 'html5lib')\n for frame in configuration.get_frame_tags():\n for iframe_tag in soup.find_all(frame):\n iframe_xpath = DomAnalyzer._get_xpath(iframe_tag)\n iframe_xpath_list.append(iframe_xpath)\n iframe_src = iframe_tag['src'] if iframe_tag.has_attr('src') else None\n try:\n self.get_dom_of_iframe(configuration, dom_list, iframe_xpath_list, iframe_src) \n iframe_tag.clear()\n except Exception as e:\n logging.error(' get_dom_of_iframe: %s \\t\\t__from crawler.py get_dom_list() ', str(e))\n dom_list.append( {\n 'url' : src,\n 'dom' : str(soup),\n 'iframe_path' : iframe_xpath_list,\n } )\n\n \n\n #==========================================================================================================================\n # CHECK \n #==========================================================================================================================\n def check_after_click(self):\n time.sleep(1)\n self.check_alert()\n self.check_window()\n self.check_tab()\n self.driver.find_element_by_xpath(\"html/body\").click()\n time.sleep(0.1)\n\n def check_alert(self):\n no_alert = False\n while not no_alert:\n try:\n alert = self.driver.switch_to_alert()\n logging.info(' click with alert: %s ', alert.text)\n alert.dismiss()\n except Exception:\n no_alert = True\n\n def check_window(self):\n if len(self.driver.window_handles) > 1:\n logging.info(' more than one window appear')\n for handle in self.driver.window_handles:\n if handle != self.main_window:\n self.driver.switch_to_window(handle)\n logging.info(\" closing: %s\", str(self.driver))\n self.driver.close()\n self.driver.switch_to_window(self.main_window)\n\n def check_tab(self):\n pass\n\n#==============================================================================================================================\n\nclass CBTExecutor(SeleniumExecutor):\n #原本 executer 的 initial 方式\n def __init__(self, browserID, url):\n #choose the type of browser\n self.browserID = browserID\n #link to the url\n self.startUrl = url\n self.main_window = None\n self.detail_element_list = []\n \n\n #原本的Page 既成方式\n '''\n def __init__(self,driver_num,Driver):\n self.Driver=Driver\n self.driver=Driver.driver\n self.url=self.driver.current_url\n self.title=self.driver.title\n \n self.dom_list=self.driver.page_source \n self.type_tuple='id','class','a' \n self.element_list=[] \n\n #self.id_list=driver.find_elements_by_xpath('//*[@id]')\n #self.class_list = driver.find_elements_by_xpath('//*[@class]')\n \n \n #\n #detail_element_list['e_list']=\"QQ\"\n #detail_element_list['type_tuple']='id'\n \n self.detail_element_list = []\n '''\n \n\n def get_element_property(self):\n self.element_list=self.driver.find_elements_by_xpath('//*')\n for i in self.element_list:\n info={\n 'element' :i,\n 'tag_name' :i.tag_name,\n #'x_path' :i.x_path\n 'type_tuple' :[],\n 'name_list' :[],\n\n 'coor':{\n 'x' :i.location['x'],\n 'y' :i.location['y'],\n },\n 'size':{\n 'height' :i.size['height'],\n 'width' :i.size['width'], \n },\n }\n store_info=False\n for j in self.type_tuple:\n if i.get_attribute(j)!=None and i.get_attribute(j)!=\"\":\n info['type_tuple'].append(j)\n info['name_list'].append(i.get_attribute(j))\n store_info=True\n if store_info == True:\n self.detail_element_list.append(info)\n \n return self.detail_element_list \n\n def print_detail_element_list(self):\n for i in self.detail_element_list:\n print (\"element:\" ,i['element'])\n #print \"xpath:\" ,i['xpath'] \n for j in range(0,len(i['name_list']),1):\n print (i['type_tuple'][j],\"=\",i['name_list'][j])\n print (\"coor:\" ,i['coor']['x'] ,i['coor']['y'])\n print (\"size:\" ,i['size']['height'] ,i['size']['width'])\n \n def get_element_attribute(self):\n for i in self.detail_element_list:\n if i['coor']['true_x'].find(\"%\")!=-1:\n print (\"define by percentage :\",i['type'],'=',i['name'])\n if i['coor']['true_x'].find(\"px\")!=-1:\n print (\"define by pixel :\",i['type'],'=',i['name'])\n\n def get_element_screenshot(self):\n print('===screenshot & elementshot')\n self.driver.get_screenshot_as_file('scr/scr.png') \n for i in self.detail_element_list:\n im = Image.open('scr/scr.png')\n left = i['coor']['x']\n top = i['coor']['y']\n right = i['coor']['x'] + i['size']['width']\n bottom = i['coor']['y'] + i['size']['height']\n im = im.crop((left, top, right, bottom)) # defines crop points\n im.save('scr/'+i['name_list'][0]+'.png') # saves new cropped image\n return 0\n", "sub_path": "executor.py", "file_name": "executor.py", "file_ext": "py", "file_size_in_byte": 20762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "abc.ABCMeta", "line_number": 27, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 29, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 33, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 37, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 41, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 45, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 73, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 73, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 75, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 75, "usage_type": "name"}, {"api_name": "selenium.webdriver.Ie", "line_number": 77, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 80, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 80, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 100, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 103, "usage_type": "call"}, {"api_name": "dom_analyzer.DomAnalyzer.serial_prefix", "line_number": 121, "usage_type": "attribute"}, {"api_name": "dom_analyzer.DomAnalyzer", "line_number": 121, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 149, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 154, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 160, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 181, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 196, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 206, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 211, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 215, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 220, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 224, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 233, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 240, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 252, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 272, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 312, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 313, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 323, "usage_type": "call"}, {"api_name": "configuration.get_frame_tags", "line_number": 324, "usage_type": "call"}, {"api_name": "dom_analyzer.DomAnalyzer._get_xpath", "line_number": 326, "usage_type": "call"}, {"api_name": "dom_analyzer.DomAnalyzer", "line_number": 326, "usage_type": "name"}, {"api_name": "configuration.is_dom_inside_iframe", "line_number": 329, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 333, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 345, "usage_type": "call"}, {"api_name": "configuration.get_frame_tags", "line_number": 346, "usage_type": "call"}, {"api_name": "dom_analyzer.DomAnalyzer._get_xpath", "line_number": 348, "usage_type": "call"}, {"api_name": "dom_analyzer.DomAnalyzer", "line_number": 348, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 355, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 368, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 373, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 380, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 387, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 391, "usage_type": "call"}]} +{"seq_id": "309802396", "text": "from collections.abc import Iterable\n\n\nclass ContextManager(object):\n def __init__(self, application, values=[]):\n self.application = application\n self.values = values\n self.context = None\n\n def __enter__(self):\n ctx = self.application._enter_context()\n if not self.values:\n return ctx\n elif isinstance(self.values, str):\n return getattr(ctx, self.values)\n elif isinstance(self.values, Iterable):\n return [getattr(ctx, key) for key in self.values]\n else:\n raise AttributeError(\"Wrong argument type!\")\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.application._exit_context(exc_type, exc_value, traceback)\n", "sub_path": "sapp/context_manager.py", "file_name": "context_manager.py", "file_ext": "py", "file_size_in_byte": 735, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "collections.abc.Iterable", "line_number": 16, "usage_type": "argument"}]} +{"seq_id": "550224183", "text": "from datetime import datetime\n\nimport core.case.database as case_database\nimport core.case.subscription as case_subscription\n\n\ndef setup_subscriptions_for_step(workflow_uids, step_uids, step_events=None, workflow_events=None):\n step_events = step_events if step_events is not None else ['Function Execution Success']\n workflow_events = workflow_events if workflow_events is not None else []\n subs = {workflow_uid: workflow_events for workflow_uid in workflow_uids} \\\n if isinstance(workflow_uids, list) else {workflow_uids: workflow_events}\n for step_uid in step_uids:\n subs[step_uid] = step_events\n case_subscription.set_subscriptions({'case1': subs})\n\n\ndef executed_steps(workflow_uid, start_time, end_time):\n events = [event.as_json()\n for event in case_database.case_db.session.query(case_database.Event). \\\n filter(case_database.Event.originator == workflow_uid).all()]\n out = []\n for event in events:\n if start_time <= datetime.strptime(event['timestamp'], '%Y-%m-%d %H:%M:%S.%f') <= end_time:\n out.append(event)\n return out\n", "sub_path": "tests/util/case_db_help.py", "file_name": "case_db_help.py", "file_ext": "py", "file_size_in_byte": 1120, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "core.case.subscription.set_subscriptions", "line_number": 14, "usage_type": "call"}, {"api_name": "core.case.subscription", "line_number": 14, "usage_type": "name"}, {"api_name": "core.case.database.case_db.session.query", "line_number": 19, "usage_type": "call"}, {"api_name": "core.case.database.case_db", "line_number": 19, "usage_type": "attribute"}, {"api_name": "core.case.database", "line_number": 19, "usage_type": "name"}, {"api_name": "core.case.database.Event", "line_number": 19, "usage_type": "attribute"}, {"api_name": "core.case.database.Event", "line_number": 20, "usage_type": "attribute"}, {"api_name": "core.case.database", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "270159821", "text": "import os\nfrom utils import yaml_stream\nfrom sqlalchemy import Table\n\n\ndef importyaml(connection, metadata, source_path):\n\n certCerts = Table('certCerts',metadata)\n certSkills = Table('certSkills',metadata,)\n\n skillmap = {\n \"basic\": 0,\n \"standard\": 1,\n \"improved\": 2,\n \"advanced\": 3,\n \"elite\": 4\n }\n\n print(\"Importing Certificates\")\n trans = connection.begin()\n\n with open(\n os.path.join(source_path, 'fsd', 'certificates.yaml'), 'r'\n ) as yamlstream:\n for certificate in yaml_stream.read_by_any(yamlstream):\n for certificate_id, certificate_details in certificate.items():\n connection.execute(\n certCerts.insert(),\n certID=certificate_id,\n name=certificate_details.get('name', ''),\n description=certificate_details.get('description', ''),\n groupID=certificate_details.get('groupID')\n )\n for skill in certificate_details['skillTypes']:\n for skillLevel in certificate_details['skillTypes'][skill]:\n connection.execute(\n certSkills.insert(),\n certID=certificate_id,\n skillID=skill,\n certLevelInt=skillmap[skillLevel],\n certLevelText=skillLevel,\n skillLevel=certificate_details['skillTypes'][skill][skillLevel]\n )\n trans.commit()\n", "sub_path": "tableloader/tableFunctions/certificates.py", "file_name": "certificates.py", "file_ext": "py", "file_size_in_byte": 1581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sqlalchemy.Table", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "utils.yaml_stream.read_by_any", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.yaml_stream", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "433992", "text": "# !/usr/bin/env python\n# coding=utf-8\n# author: sunshinebooming@gmail.com\n\nimport re\nimport os\nimport requests\nimport math\nimport random\nimport time\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom config import *\nimport sqlite3\nfrom csv import reader\nfrom lib.utils.local_time import *\nfrom lib.utils.file_storage import *\nfrom lib.request.web_request_header import *\n\n\nclass web_house():\n def __init__(self, city, xiaoqu, xiaoqu_name):\n self.city = city\n self.xiaoqu = xiaoqu\n self.xiaoqu_name = xiaoqu_name\n self.numbers = set()\n self.price_info_list = list()\n conn = sqlite3.connect('house.db')\n conn.text_factory = str\n c = conn.cursor()\n # Create table\n sql = 'create table if not exists ' + self.xiaoqu_name + ' ( hid integer, plan text, layer text, area float, direction text, elevator text, begin_time date, end_time date, duration integer, open_price integer, deal_price integer, by_price integer)'\n\n c.execute(sql)\n conn.commit()\n conn.close()\n\n\n def format_price_info(self, house_number, house_type, house_layer, house_area, house_direction,\n house_elevator, house_time, deal_date, duration, begin_price, end_price, by_price):\n return \"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}\\n\".\\\n format(house_number, house_type, house_layer, house_area, house_direction,\n house_elevator, house_time, deal_date, duration, begin_price, end_price, by_price)\n\n def get_raw_price_info(self, houseid, deal_date, headers):\n if houseid in self.numbers or self.check_db(houseid):\n return\n if True == RANDOM_DELAY:\n # 随机延时(0-15)秒\n random_delay = random.randint(0, DELAY_MAX + 1)\n print('random delay: %s S...' % (random_delay))\n time.sleep(random_delay)\n\n target_web = 'http://{0}.ke.com/chengjiao/{1}.html'.format(self.city, houseid)\n print('request target web:', target_web)\n response = requests.get(target_web, timeout=10, headers=headers)\n html = response.content\n soup = BeautifulSoup(html, 'lxml')\n\n # with open(\"test4.txt\", \"r\") as f:\n # contents = f.read()\n # soup = BeautifulSoup(contents, 'lxml')\n\n try:\n house_price = soup.find(\"div\", class_=\"info fr\")\n house_base = soup.find(\"div\", class_=\"base\").find(\"div\", class_=\"content\")\n house_transaction = soup.find(\"div\", class_=\"transaction\").find(\"div\", class_=\"content\")\n except Exception as e:\n return\n\n try:\n end_price = house_price.find(\"span\", class_=\"dealTotalPrice\").text.encode(\"utf-8\").decode(\"utf-8\")\n end_price = re.findall(r'\\d+', end_price)[0].encode(\"utf-8\").decode(\"utf-8\")\n by_price = re.findall(r'\\d+', house_price.find(\"b\").text)[0].encode(\"utf-8\").decode(\"utf-8\")\n price_msg = house_price.find(\"div\", class_=\"msg\")\n spans = price_msg.find_all(\"span\")\n begin_price = re.findall(r'\\d+', spans[0].text)[0].encode(\"utf-8\").decode(\"utf-8\")\n duration = re.findall(r'\\d+', spans[1].text)[0].encode(\"utf-8\").decode(\"utf-8\")\n except Exception as e:\n print(e)\n begin_price = \"0\"\n duration = \"0\"\n end_price = \"0\"\n by_price = \"0\"\n\n try:\n lis = house_base.find_all(\"li\")\n for li in lis:\n li.span.decompose()\n house_type = lis[0].text.strip().encode(\"utf-8\").decode(\"utf-8\")\n house_layer = lis[1].text.strip().encode(\"utf-8\").decode(\"utf-8\")\n house_area = re.findall(r'\\d+', lis[2].text.strip())[0].encode(\"utf-8\").decode(\"utf-8\")\n house_direction = lis[6].text.strip().encode(\"utf-8\").decode(\"utf-8\")\n house_elevator = lis[12].text.strip().encode(\"utf-8\").decode(\"utf-8\")\n except Exception as e:\n house_type = \"\"\n house_layer = \"\"\n house_area = \"\"\n house_direction = \"\"\n house_elevator = \"无\"\n\n try:\n lis = house_transaction.find_all(\"li\")\n for li in lis:\n li.span.decompose()\n house_number = lis[0].text.strip().encode(\"utf-8\").decode(\"utf-8\")\n house_time = lis[2].text.strip().encode(\"utf-8\").decode(\"utf-8\")\n except Exception as e:\n house_number = \"\"\n house_time = \"\"\n\n # 打印单条房价信息\n print(\"\\t===> 房号: %s, 户型: %s, 楼层: %s, 面积: %s 平米, 朝向: %s, \"\n \"电梯: %s, 放盘时间: %s, 成交时间: %s, 周期: %s, 挂牌价: %s, 成交价: %s, 单价: %s\" % (\n house_number, house_type, house_layer, house_area, house_direction,\n house_elevator, house_time, deal_date, duration, begin_price, end_price, by_price))\n\n # 格式化单条房价信息,并添加到list中\n if int(house_number) not in self.numbers:\n self.numbers.add(int(house_number))\n price_fmt_str = self.format_price_info(house_number, house_type, house_layer, house_area, house_direction,\n house_elevator, house_time, deal_date, duration, begin_price, end_price, by_price)\n self.price_info_list.append(price_fmt_str)\n\n def get_page_price_info(self, target_sub_web, headers):\n if True == RANDOM_DELAY:\n # 随机延时(0-15)秒\n random_delay = random.randint(0, DELAY_MAX + 1)\n print('random delay: %s S...' % (random_delay))\n time.sleep(random_delay)\n\n print('request target web:', target_sub_web)\n response = requests.get(target_sub_web, timeout=10, headers=headers)\n html = response.content\n soup = BeautifulSoup(html, 'lxml')\n\n # with open(\"test2.txt\", \"r\") as f:\n # contents = f.read()\n # soup = BeautifulSoup(contents, 'lxml')\n\n # 获取房价相关内容\n house_contents = soup.find(\"ul\", class_=\"listContent\").find_all(\"li\", class_=\"VIEWDATA\")\n for house_content in house_contents:\n item = str(house_content.find(\"a\", class_=\"CLICKDATA maidian-detail\"))\n num = int(re.findall(r'\\d+', item[item.find(\"fb_item_id\"):])[0])\n deal_date = house_content.find(\"div\", \"dealDate\").text.strip()\n dates = re.findall(r'\\d+', deal_date)\n deal_date = datetime.date(int(dates[0]), int(dates[1]), int(dates[2]))\n self.get_raw_price_info(num, deal_date, headers)\n\n\n def get_price_info(self):\n # xiaoqu = xiaoqu.encode(\"utf-8\").decode(\"utf-8\")\n\n target_web = 'http://{0}.ke.com/chengjiao/rs{1}/'.format(self.city, self.xiaoqu)\n print('request target web:', target_web)\n\n # 获得请求头部\n headers = create_request_headers()\n\n # 发起网页请求(获取总页数)\n response = requests.get(target_web, timeout=10, headers=headers)\n html = response.content\n soup = BeautifulSoup(html, 'lxml')\n\n # 获得response总页数\n try:\n page_box = soup.find_all('div', class_='page-box house-lst-page-box')[0]\n tmp1 = str(page_box).find(\"totalPage\")\n tmp2 = str(page_box).find(\"curPage\")\n total_page = int(re.findall(r'\\d+', str(page_box)[tmp1: tmp2])[0])\n except Exception as e:\n print(\"warning: only find one page for {0}\".format(self.xiaoqu))\n print(e)\n total_page = 1\n\n print('total pages:', total_page)\n if total_page > 2:\n total_page = 2\n headers = create_request_headers()\n # 遍历房价网页\n for i in range(1, total_page + 1):\n # for i in range(1, total_page):\n target_sub_web = target_web + \"pg{0}\".format(i)\n print('request target web:', target_sub_web)\n\n # 发起网页请求\n self.get_page_price_info(target_sub_web, headers)\n\n\n\n def store_price_info(self):\n # 创建数据存储目录\n root_path = get_root_path()\n store_dir_path = root_path + \"/data/original_data/{0}\".format(self.city)\n is_dir_exit = os.path.exists(store_dir_path)\n if not is_dir_exit:\n os.makedirs(store_dir_path)\n\n # 存储格式化的房价数据到相应日期的文件中\n store_path = store_dir_path + \"/{0}.csv\".format(self.xiaoqu)\n with open(store_path, \"w\") as fd:\n fd.write(\"房号, 户型, 楼层, 面积, 朝向, 电梯, 放盘时间, 成交时间, 周期, 挂牌价, 成交价, 单价\\n\")\n for price in self.price_info_list:\n fd.write(price)\n\n\n def fetch_old_data(self):\n root_path = get_root_path()\n store_dir_path = root_path + \"/data/original_data/{0}\".format(self.city)\n is_dir_exit = os.path.exists(store_dir_path)\n if not is_dir_exit:\n os.makedirs(store_dir_path)\n\n # 存储格式化的房价数据到相应日期的文件中\n store_path = store_dir_path + \"/{0}.csv\".format(self.xiaoqu)\n with open(store_path, \"r\") as fd:\n csv_reader = reader(fd)\n # Pass reader object to list() to get a list of lists\n list_of_rows = list(csv_reader)[1:]\n self.price_info_list += list_of_rows\n\n def check_db(self, hid):\n conn = sqlite3.connect('house.db')\n conn.text_factory = str\n c = conn.cursor()\n sql = \"SELECT rowid FROM {0} WHERE hid = {1}\".format(self.xiaoqu_name, hid,)\n c.execute(sql)\n data = c.fetchall()\n\n conn.close()\n if len(data) == 0:\n return False\n return True\n\n def store_into_db(self):\n conn = sqlite3.connect('house.db')\n conn.text_factory = str\n c = conn.cursor()\n for info in self.price_info_list:\n info = info.strip().split(',')\n if not self.check_db(info[0]):\n sql = \"insert into {0} values ( {1}, '{2}', '{3}', {4}, '{5}', '{6}', '{7}', '{8}', {9}, {10}, {11}, {12})\"\\\n .format(self.xiaoqu_name, info[0], info[1], info[2], info[3], info[4], info[5], info[6], info[7], info[8],\n info[9], info[10], info[11])\n c.execute(sql)\n\n conn.commit()\n conn.close()\n\n def print_price(self, year, des, results):\n print(\"========================================\")\n for month, price in zip(range(1, 13), results):\n begin = str(year) + str(month).zfill(2) + '01'\n end = str(year) + str(month + 1).zfill(2) + '01'\n if month == 12:\n end = str(year + 1) + '0101'\n res = \"0\"\n if len(price):\n res = str(sum(price) / len(price))\n print(\"{0} - {1} {2} deal {3} price {4} \".format(begin, end, des, len(price), res))\n\n def price_by_month(self, year, ptype):\n results = []\n results2 = []\n results3 = []\n conn = sqlite3.connect('house.db')\n conn.text_factory = str\n c = conn.cursor()\n for month in range(1, 13):\n begin = str(year) + str(month).zfill(2) + '01'\n end = str(year) + str(month + 1).zfill(2) + '01'\n if month == 12:\n end = str(year + 1) + '0101'\n sql = 'select * from ' + self.xiaoqu_name + ' where substr(end_time, 2,4) || substr(end_time, 7,2) || substr(end_time, 9,2) between \"' + begin + '\" and \"' + end + '\"'\n # print(sql)\n c.execute(sql)\n data = c.fetchall()\n # print(len(data))\n price = []\n price2 = []\n price3 = []\n for row in data:\n if row[5] == '无':\n continue\n if row[1].find(\"2室\") != -1:\n price2.append(int(row[11]))\n if row[1].find(\"3室\") != -1:\n price3.append(int(row[11]))\n price.append(int(row[11]))\n results.append(price)\n results2.append(price2)\n results3.append(price3)\n if ptype == 0:\n self.print_price(year, \"总\", results)\n self.print_price(year, \"2室\", results2)\n self.print_price(year, \"3室\", results3)\n elif ptype == 2:\n self.print_price(year, \"2室\", results2)\n elif ptype == 3:\n self.print_price(year, \"3室\", results3)\n\n conn.close()\n \n\n\nif __name__ == \"__main__\":\n pass\n", "sub_path": "lib/request/web_house.py", "file_name": "web_house.py", "file_ext": "py", "file_size_in_byte": 12550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sqlite3.connect", "line_number": 28, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 56, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 58, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 73, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 74, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 77, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 78, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 128, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 133, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 135, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 145, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 148, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 162, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 164, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path", "line_number": 211, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 213, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 218, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 224, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 237, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 267, "usage_type": "call"}]} +{"seq_id": "504574350", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass FC(nn.Module):\n def __init__(self, input_dim, dprop):\n super(FC, self).__init__()\n self.input_dim = input_dim\n self.dprop = dprop\n\n self.fc1 = nn.Linear(input_dim, 50)\n self.fc2 = nn.Linear(50, 1)\n\n # self.loss_fn = nn.NLLLoss()\n self.loss_fn = torch.nn.MSELoss()\n # self.loss_fn = lambda pred, target: torch.mean(torch.pow((target - pred), 2)) #L2 loss\n\n def forward(self, x):\n out = self.fc1(x)\n out = F.dropout(out, p=self.dprop, training=True, inplace=True)\n out = F.relu(out, inplace=True)\n out = self.fc2(out)\n # out = self.log_softmax(out)\n return out", "sub_path": "model/fc.py", "file_name": "fc.py", "file_ext": "py", "file_size_in_byte": 736, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.dropout", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "270333307", "text": "\"\"\"\nBase exchange class\n\"\"\"\nimport time\nimport socket\nimport websocket\nfrom urllib.error import URLError\n\nimport aiohttp\nimport asyncio\n\nimport json\n\n\nclass BaseExchangeAPI():\n def __init__(self):\n self._SERVER_URL = \"\"\n self.currency_symbol = \"\"\n self.exchange_name = \"\"\n self.command_names = []\n self.short_url = \"\"\n self.last_updated_time = 0\n self.update_failure_count = 0\n\n self.price_eth = None\n self.price_usd = None\n self.price_btc = None\n self.volume_usd = None\n self.volume_eth = None\n self.volume_btc = None\n self.change_24h = None\n self.eth_price_usd = None\n self.btc_price_usd = None\n\n # TODO: make this function, use it in enclaves\n async def _get_json_from_websocket(self, url, commands):\n pass\n\n async def _get_json_from_url(self, url):\n async def fetch(session, url):\n async with session.get(url) as response:\n return await response.text()\n \n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n async with aiohttp.ClientSession(headers=headers) as session:\n response = await fetch(session, url)\n\n try:\n data = json.loads(response)\n except json.decoder.JSONDecodeError:\n response = response[:2000]\n if (\"be right back\" in response\n or \"404 Not Found\" in response and \"nginx\" in response\n or \"Request unsuccessful. Incapsula incident ID\" in response):\n raise TimeoutError(\"api is down - got error page\")\n else:\n raise TimeoutError(\"api sent bad data ({})\".format(repr(response)))\n else:\n return data\n\n async def update(self, timeout=10.0):\n try:\n await self._update(timeout=timeout)\n except (websocket._exceptions.WebSocketTimeoutException,\n websocket._exceptions.WebSocketBadStatusException,\n websocket._exceptions.WebSocketAddressException,\n TimeoutError,\n ConnectionResetError,\n ConnectionRefusedError,\n socket.gaierror,\n socket.timeout,\n aiohttp.errors.ClientResponseError,\n aiohttp.errors.ServerDisconnectedError,\n URLError) as e:\n self.update_failure_count += 1\n raise TimeoutError(str(e)) from e\n except Exception:\n self.update_failure_count += 1\n raise\n else:\n self.last_updated_time = time.time()\n self.update_failure_count = 0\n\n def print_all_values(self):\n print(self.exchange_name, self.currency_symbol, 'price_eth ', self.price_eth)\n print(self.exchange_name, self.currency_symbol, 'price_btc ', self.price_btc)\n print(self.exchange_name, self.currency_symbol, 'price_usd ', self.price_usd)\n print(self.exchange_name, self.currency_symbol, 'volume_usd ', self.volume_usd)\n print(self.exchange_name, self.currency_symbol, 'volume_eth ', self.volume_eth)\n print(self.exchange_name, self.currency_symbol, 'volume_btc ', self.volume_btc)\n print(self.exchange_name, self.currency_symbol, 'change_24h ', self.change_24h)\n print(self.exchange_name, self.currency_symbol, 'eth_price_usd', self.eth_price_usd)\n print(self.exchange_name, self.currency_symbol, 'btc_price_usd', self.btc_price_usd)\n\n def load_once_and_print_values(self):\n async def load_once():\n await self.update()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(load_once())\n self.print_all_values()\n", "sub_path": "exchanges/base_exchange.py", "file_name": "base_exchange.py", "file_ext": "py", "file_size_in_byte": 3828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 47, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 52, "usage_type": "attribute"}, {"api_name": "websocket._exceptions", "line_number": 66, "usage_type": "attribute"}, {"api_name": "websocket._exceptions", "line_number": 67, "usage_type": "attribute"}, {"api_name": "websocket._exceptions", "line_number": 68, "usage_type": "attribute"}, {"api_name": "socket.gaierror", "line_number": 72, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 73, "usage_type": "attribute"}, {"api_name": "aiohttp.errors", "line_number": 74, "usage_type": "attribute"}, {"api_name": "aiohttp.errors", "line_number": 75, "usage_type": "attribute"}, {"api_name": "urllib.error.URLError", "line_number": 76, "usage_type": "name"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "602390375", "text": "# Copyright (C) 2016 Nicira, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport random\nimport subprocess\n\nimport ovs.vlog\nfrom ovn_k8s.common import variables\n\nvlog = ovs.vlog.Vlog(\"util\")\n\n\ndef call_popen(cmd):\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output = child.communicate()\n if child.returncode:\n raise RuntimeError(\"Fatal error executing %s\" % (cmd))\n if len(output) == 0 or output[0] is None:\n output = \"\"\n else:\n output = output[0].strip()\n return output\n\n\ndef call_prog(prog, args_list):\n cmd = [prog, \"--timeout=5\", \"-vconsole:off\"] + args_list\n return call_popen(cmd)\n\n\ndef ovs_vsctl(*args):\n return call_prog(\"ovs-vsctl\", list(args))\n\n\ndef ovn_nbctl(*args):\n args_list = list(args)\n database_option = \"%s=%s\" % (\"--db\", variables.OVN_NB)\n args_list.insert(0, database_option)\n return call_prog(\"ovn-nbctl\", args_list)\n\n\ndef generate_mac(prefix=\"00:00:00\"):\n random.seed()\n mac = \"%s:%02X:%02X:%02X\" % (\n prefix,\n random.randint(0, 255),\n random.randint(0, 255),\n random.randint(0, 255))\n return mac\n\n\ndef process_stream(data_stream, event_callback):\n # StopIteration should be caught in the routine that sets up the stream\n # and reconnects it\n line = next(data_stream)\n if not line:\n return\n\n try:\n event_callback(json.loads(line))\n except ValueError:\n vlog.warn(\"Invalid JSON data from response stream:%s\" % line)\n", "sub_path": "ovn_k8s/common/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 2003, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "ovs.vlog.vlog.Vlog", "line_number": 22, "usage_type": "call"}, {"api_name": "ovs.vlog.vlog", "line_number": 22, "usage_type": "attribute"}, {"api_name": "ovs.vlog", "line_number": 22, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 26, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ovn_k8s.common.variables.OVN_NB", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ovn_k8s.common.variables", "line_number": 48, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 54, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 58, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "308458605", "text": "import cv2\nfrom typing import List\nfrom pathlib import Path\nimport os\n\nfront_rgb_images_path = Path(\"/home/michael/Desktop/projects/ROAR/opencv_object_tracking/data/front_rgb\")\npaths: List[Path] = sorted(Path(front_rgb_images_path).iterdir(), key=os.path.getmtime)\n\n\ntensorflowNet = cv2.dnn.readNetFromTensorflow('faster_rcnn_inception_v2/faster_rcnn_inception_v2_coco_2018_01_28.pb',\n 'faster_rcnn_inception_v2/faster_rcnn_inception_v2_coco_2018_01_28.pbtxt')\ntensorflowNet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\ntensorflowNet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n\nfor img_path in paths:\n image = cv2.imread(img_path.as_posix())\n\n rows, cols, channels = image.shape\n\n # Use the given image as input, which needs to be blob(s).\n tensorflowNet.setInput(cv2.dnn.blobFromImage(image, size=(300, 300), swapRB=True, crop=False))\n\n # Runs a forward pass to compute the net output\n networkOutput = tensorflowNet.forward()\n\n # Loop on the outputs\n for detection in networkOutput[0, 0]:\n\n score = float(detection[2])\n if score > 0.5:\n left = detection[3] * cols\n top = detection[4] * rows\n right = detection[5] * cols\n bottom = detection[6] * rows\n area = (right - left) * (bottom - top)\n\n # draw a red rectangle around detected objects\n if area < 10000:\n cv2.rectangle(image, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)\n\n # Show the image with a rectagle surrounding the detected objects\n cv2.imshow('Image', image)\n\n key = cv2.waitKey(2) & 0xFF\n if key == ord(\"s\"):\n pass\n\n elif key == ord(\"q\"):\n break\n\n", "sub_path": "faster_rcnn_multi.py", "file_name": "faster_rcnn_multi.py", "file_ext": "py", "file_size_in_byte": 1707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pathlib.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 7, "usage_type": "name"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.dnn.readNetFromTensorflow", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.dnn", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.dnn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "90973181", "text": "import pprint\n\nimport cherrypy\nfrom openid.consumer.consumer import Consumer\nfrom openid.store.filestore import FileOpenIDStore\nfrom openid.extensions.sreg import SRegRequest, SRegResponse\nfrom openid.extensions.ax import FetchRequest, FetchResponse, AttrInfo\n\nfrom lib.applicationpaths import ApplicationPaths\nfrom model.openidaccount import OpenIdAccount\nfrom model.account import Account\n\nclass OpenIdHelper(object):\n \n @classmethod\n def get_auth_redirect_url(cls, identifier):\n s = FileOpenIDStore(cherrypy.request.app.config['appSettings']['openIdDataStoreDirectory'])\n consumer = Consumer(session=cherrypy.session, store=s)\n #consumer.setAssociationPreference([('HMAC-SHA256', 'DH-SHA256')])\n authRequest = consumer.begin(identifier)\n \n # sreg\n authRequest.addExtension(SRegRequest(required=['email'], optional=['fullname']))\n \n # ax\n axFetchRequest = FetchRequest()\n axFetchRequest.add(AttrInfo(type_uri='http://axschema.org/contact/email', required=True))\n axFetchRequest.add(AttrInfo(type_uri='http://axschema.org/namePerson', required=True))\n axFetchRequest.add(AttrInfo(type_uri='http://axschema.org/namePerson/first', required=True))\n axFetchRequest.add(AttrInfo(type_uri='http://axschema.org/namePerson/last', required=True))\n authRequest.addExtension(axFetchRequest)\n \n return authRequest.redirectURL(\n realm=ApplicationPaths.get_site_root(),\n return_to=ApplicationPaths.get_handle_openid_auth_response_path())\n \n @classmethod\n def handle_auth_response(cls, query):\n #pprint.pprint(query)\n \n s = FileOpenIDStore(cherrypy.request.app.config['appSettings']['openIdDataStoreDirectory'])\n consumer = Consumer(session=cherrypy.session, store=s)\n response = consumer.complete(\n query=query,\n current_url=ApplicationPaths.get_handle_openid_auth_response_path())\n \n if 'success' == response.status:\n sregResponse = SRegResponse.fromSuccessResponse(response)\n axResponse = FetchResponse.fromSuccessResponse(response)\n \n email = None\n if sregResponse:\n email = sregResponse.get('email')\n elif axResponse:\n email = axResponse.data['http://axschema.org/contact/email'][0]\n \n if not OpenIdAccount.exists(identifier=response.identity_url):\n # start a transaction to enclose creation of the OpenID account\n # record and any other database records that need to be created\n # at the same time \n OpenIdAccount.create(identifier=response.identity_url, email=email)\n \n cherrypy.session['account-id'] = Account.get_account_id_by_email(email)\n \n raise cherrypy.HTTPRedirect(cls.__get_return_to_path())\n elif 'cancel' == response.status:\n raise cherrypy.HTTPRedirect('/error/openid?reason=cancelled')\n else:\n print('{0} {1}'.format(response.status, response.message))\n raise cherrypy.HTTPRedirect('/error/openid')\n \n @classmethod\n def __get_return_to_path(cls):\n o = '/'\n \n if cherrypy.session.has_key('return-to-after-login'):\n o = cherrypy.session.get('return-to-after-login')\n \n return o\n ", "sub_path": "src/www/lib/openidhelper.py", "file_name": "openidhelper.py", "file_ext": "py", "file_size_in_byte": 3479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "openid.store.filestore.FileOpenIDStore", "line_number": 17, "usage_type": "call"}, {"api_name": "cherrypy.request", "line_number": 17, "usage_type": "attribute"}, {"api_name": "openid.consumer.consumer.Consumer", "line_number": 18, "usage_type": "call"}, {"api_name": "cherrypy.session", "line_number": 18, "usage_type": "attribute"}, {"api_name": "openid.extensions.sreg.SRegRequest", "line_number": 23, "usage_type": "call"}, {"api_name": "openid.extensions.ax.FetchRequest", "line_number": 26, "usage_type": "call"}, {"api_name": "openid.extensions.ax.AttrInfo", "line_number": 27, "usage_type": "call"}, {"api_name": "openid.extensions.ax.AttrInfo", "line_number": 28, "usage_type": "call"}, {"api_name": "openid.extensions.ax.AttrInfo", "line_number": 29, "usage_type": "call"}, {"api_name": "openid.extensions.ax.AttrInfo", "line_number": 30, "usage_type": "call"}, {"api_name": "lib.applicationpaths.ApplicationPaths.get_site_root", "line_number": 34, "usage_type": "call"}, {"api_name": "lib.applicationpaths.ApplicationPaths", "line_number": 34, "usage_type": "name"}, {"api_name": "lib.applicationpaths.ApplicationPaths.get_handle_openid_auth_response_path", "line_number": 35, "usage_type": "call"}, {"api_name": "lib.applicationpaths.ApplicationPaths", "line_number": 35, "usage_type": "name"}, {"api_name": "openid.store.filestore.FileOpenIDStore", "line_number": 41, "usage_type": "call"}, {"api_name": "cherrypy.request", "line_number": 41, "usage_type": "attribute"}, {"api_name": "openid.consumer.consumer.Consumer", "line_number": 42, "usage_type": "call"}, {"api_name": "cherrypy.session", "line_number": 42, "usage_type": "attribute"}, {"api_name": "lib.applicationpaths.ApplicationPaths.get_handle_openid_auth_response_path", "line_number": 45, "usage_type": "call"}, {"api_name": "lib.applicationpaths.ApplicationPaths", "line_number": 45, "usage_type": "name"}, {"api_name": "openid.extensions.sreg.SRegResponse.fromSuccessResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "openid.extensions.sreg.SRegResponse", "line_number": 48, "usage_type": "name"}, {"api_name": "openid.extensions.ax.FetchResponse.fromSuccessResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "openid.extensions.ax.FetchResponse", "line_number": 49, "usage_type": "name"}, {"api_name": "model.openidaccount.OpenIdAccount.exists", "line_number": 57, "usage_type": "call"}, {"api_name": "model.openidaccount.OpenIdAccount", "line_number": 57, "usage_type": "name"}, {"api_name": "model.openidaccount.OpenIdAccount.create", "line_number": 61, "usage_type": "call"}, {"api_name": "model.openidaccount.OpenIdAccount", "line_number": 61, "usage_type": "name"}, {"api_name": "cherrypy.session", "line_number": 63, "usage_type": "attribute"}, {"api_name": "model.account.Account.get_account_id_by_email", "line_number": 63, "usage_type": "call"}, {"api_name": "model.account.Account", "line_number": 63, "usage_type": "name"}, {"api_name": "cherrypy.HTTPRedirect", "line_number": 65, "usage_type": "call"}, {"api_name": "cherrypy.HTTPRedirect", "line_number": 67, "usage_type": "call"}, {"api_name": "cherrypy.HTTPRedirect", "line_number": 70, "usage_type": "call"}, {"api_name": "cherrypy.session.has_key", "line_number": 76, "usage_type": "call"}, {"api_name": "cherrypy.session", "line_number": 76, "usage_type": "attribute"}, {"api_name": "cherrypy.session.get", "line_number": 77, "usage_type": "call"}, {"api_name": "cherrypy.session", "line_number": 77, "usage_type": "attribute"}]} +{"seq_id": "225340543", "text": "from django.shortcuts import render\nfrom django.views.generic import ListView\nfrom .models import Assistance\n\n# Create your views here.\n\nclass assists_list(ListView):\n model = Assistance\n template_name = \"/assists/assists_list.html\"\ndef show_assists(request):\n assists_list = Assistance.objects.all()\n context = {\n \"assists_list\": assists_list\n }\n return render(request, \"assists/assists_list.html\", context)\n\n\ndef assists_detail(request, id):\n assist = Assistance.objects.get(id=id)\n contracts = assist.contract_set.all()\n factors = assist.factor_set.all()\n context = {\n \"assist\":assist,\n \"contracts\":contracts,\n \"factors\":factors,\n }\n return render(request, \"assist_details.html\", context)\n", "sub_path": "assists/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.views.generic.ListView", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Assistance", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Assistance.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Assistance.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Assistance", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Assistance.objects.get", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Assistance.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Assistance", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "220526075", "text": "# Requires native libraries\r\nfrom datetime import datetime, timedelta\r\nfrom json import dumps, loads\r\nfrom urllib import error, request\r\n\r\n# Requires 3rd-party libraries\r\nfrom dateutil import parser, tz\r\n\r\n# Requires supplement Vainglory API libraries\r\nfrom VaingloryEvents import Events\r\nfrom VaingloryMatch import Match, MatchDetails\r\nfrom VaingloryPlayer import Player\r\n\r\n\r\nclass VaingloryAPI:\r\n\r\n def __init__(self, APIKey=None):\r\n if APIKey is None:\r\n self._headers = {\"Authorization\": \"[redacted]\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\",\r\n \"X-TITLE-ID\": \"semc-vainglory\",\r\n \"Accept\": \"application/vnd.api+json\"}\r\n else:\r\n if type(APIKey) != str:\r\n raise ValueError(\"VaingloryAPI Constructor Error: Invalid developer key.\")\r\n\r\n self._headers = {\"Authorization\": APIKey,\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\",\r\n \"X-TITLE-ID\": \"semc-vainglory\",\r\n \"Accept\": \"application/vnd.api+json\"}\r\n\r\n def SearchPlayers(self, names):\r\n if (type(names) == list) or (type(names) == tuple):\r\n names = \",\".join(names)\r\n elif type(names) != str:\r\n raise ValueError(\"VaingloryAPI.SearchPlayers Error: Argument type mismatch.\")\r\n\r\n try:\r\n with request.urlopen(request.Request(\"https://api.dc01.gamelockerapp.com/shards/na/players?filter[playerNames]=\" + names, headers=self._headers)) as response:\r\n players = list()\r\n for info in loads(response.read())[\"data\"]:\r\n players.append(Player([info[\"id\"], info[\"attributes\"][\"name\"],\r\n info[\"attributes\"][\"stats\"][\"level\"],\r\n info[\"attributes\"][\"stats\"][\"skillTier\"],\r\n info[\"attributes\"][\"stats\"][\"played_ranked\"],\r\n info[\"attributes\"][\"stats\"][\"wins\"],\r\n info[\"attributes\"][\"stats\"][\"played\"] -\r\n info[\"attributes\"][\"stats\"][\"wins\"],\r\n info[\"attributes\"][\"stats\"][\"winStreak\"],\r\n info[\"attributes\"][\"stats\"][\"lossStreak\"]]))\r\n return tuple(players)\r\n\r\n except error.HTTPError as requestError:\r\n if requestError.code == 404:\r\n print(\"Server error: NOT_FOUND. No result for search.\")\r\n elif requestError.code == 429:\r\n print(\"Server error: TOO_MANY_REQUESTS. Search limit reached.\")\r\n else:\r\n print(requestError)\r\n return None\r\n\r\n def GetPlayerInfo(self, ID):\r\n if type(ID) != str:\r\n raise ValueError(\"VaingloryAPI.GetPlayerInfo Error: Argument type mismatch.\")\r\n\r\n try:\r\n with request.urlopen(request.Request(\"https://api.dc01.gamelockerapp.com/shards/na/players/\" + ID, headers=self._headers)) as response:\r\n info = loads(response.read())[\"data\"]\r\n return Player([info[\"id\"], info[\"attributes\"][\"name\"],\r\n info[\"attributes\"][\"stats\"][\"level\"],\r\n info[\"attributes\"][\"stats\"][\"skillTier\"],\r\n info[\"attributes\"][\"stats\"][\"played_ranked\"],\r\n info[\"attributes\"][\"stats\"][\"wins\"],\r\n info[\"attributes\"][\"stats\"][\"played\"] -\r\n info[\"attributes\"][\"stats\"][\"wins\"],\r\n info[\"attributes\"][\"stats\"][\"winStreak\"],\r\n info[\"attributes\"][\"stats\"][\"lossStreak\"]])\r\n\r\n except error.HTTPError as requestError:\r\n if requestError.code == 404:\r\n print(\"Server error: NOT_FOUND. No result for search.\")\r\n elif requestError.code == 429:\r\n print(\"Server error: TOO_MANY_REQUESTS. Request limit reached.\")\r\n else:\r\n print(requestError)\r\n return None\r\n\r\n def SearchMatches(self, players, startDateTime=None, endDateTime=None):\r\n if type(players) == Player:\r\n players = players.Name()\r\n elif (type(players) == list) or (type(players) == tuple):\r\n players = \",\".join([player.Name() for player in players])\r\n else:\r\n raise ValueError(\"VaingloryAPI.SearchMatches Error: Argument type mismatch.\")\r\n\r\n if startDateTime is not None:\r\n if type(startDateTime) == str:\r\n startDateTime = parser.parse(startDateTime)\r\n elif type(startDateTime) != datetime:\r\n raise ValueError(\"VaingloryAPI.SearchMatches Error: Argument type mismatch.\")\r\n else:\r\n startDateTime = datetime.now() - timedelta(days=7)\r\n\r\n if endDateTime is not None:\r\n if type(endDateTime) == str:\r\n endDateTime = parser.parse(endDateTime)\r\n elif type(endDateTime) != datetime:\r\n raise ValueError(\"VaingloryAPI.SearchMatches Error: Argument type mismatch.\")\r\n else:\r\n endDateTime = datetime.now()\r\n\r\n startDateTime = startDateTime.replace(tzinfo=tz.tzlocal(), microsecond=0).astimezone(tz.tzutc()).isoformat()[:19] + \"Z\"\r\n endDateTime = endDateTime.replace(tzinfo=tz.tzlocal(), microsecond=0).astimezone(tz.tzutc()).isoformat()[:19] + \"Z\"\r\n try:\r\n with request.urlopen(request.Request(\"https://api.dc01.gamelockerapp.com/shards/na/matches?sort=-createdAt&filter[createdAt-start]=\" + startDateTime + \"&filter[createdAt-end]=\" + endDateTime + \"&filter[playerNames]=\" + players, headers=self._headers)) as response:\r\n JSON = loads(response.read())\r\n matches = list()\r\n for info in JSON[\"data\"]:\r\n matches.append(Match([info[\"id\"],\r\n parser.parse(info[\"attributes\"][\"createdAt\"]).replace(tzinfo=tz.tzutc()),\r\n info[\"attributes\"][\"duration\"],\r\n \"Standard\" if info[\"attributes\"][\"gameMode\"].find(\"_\") == -1 else info[\"attributes\"][\"gameMode\"].split(\"_\")[0].capitalize(),\r\n info[\"attributes\"][\"gameMode\"].find(\"ranked\") != -1,\r\n info[\"attributes\"][\"stats\"][\"endGameReason\"]]))\r\n while \"next\" in JSON[\"links\"]:\r\n with request.urlopen(request.Request(JSON[\"links\"][\"next\"], headers=self._headers)) as next_page:\r\n JSON = loads(next_page.read())\r\n for info in JSON[\"data\"]:\r\n matches.append(Match([info[\"id\"],\r\n parser.parse(info[\"attributes\"][\"createdAt\"]).replace(tzinfo=tz.tzutc()),\r\n info[\"attributes\"][\"duration\"],\r\n \"Standard\" if info[\"attributes\"][\"gameMode\"].find(\"_\") == -1 else info[\"attributes\"][\"gameMode\"].split(\"_\")[0].capitalize(),\r\n info[\"attributes\"][\"gameMode\"].find(\"ranked\") != -1,\r\n info[\"attributes\"][\"stats\"][\"endGameReason\"]]))\r\n return matches\r\n\r\n except error.HTTPError as requestError:\r\n if requestError.code == 400:\r\n print(\"Server error: BAD_REQUEST. Check player name and date range.\")\r\n elif requestError.code == 404:\r\n print(\"Server error: NOT_FOUND. No result for search.\")\r\n elif requestError.code == 429:\r\n print(\"Server error: TOO_MANY_REQUESTS. Search limit reached.\")\r\n else:\r\n print(requestError)\r\n return None\r\n\r\n def GetMatchInfo(self, match):\r\n if type(match) == Match:\r\n match = match.ID()\r\n else:\r\n raise ValueError(\"VaingloryAPI.GetMatchInfo Error: Argument type mismatch.\")\r\n\r\n try:\r\n with request.urlopen(request.Request(\"https://api.dc01.gamelockerapp.com/shards/na/matches/\" + match, headers=self._headers)) as response:\r\n rosters, participants, players = dict(), dict(), dict()\r\n telemetryURL = None\r\n for JSON in loads(response.read())[\"included\"]:\r\n if JSON[\"type\"] == \"roster\":\r\n rosters[JSON[\"id\"]] = JSON\r\n elif JSON[\"type\"] == \"participant\":\r\n participants[JSON[\"id\"]] = JSON\r\n elif JSON[\"type\"] == \"player\":\r\n players[JSON[\"id\"]] = JSON\r\n elif JSON[\"type\"] == \"asset\":\r\n telemetryURL = JSON[\"attributes\"][\"URL\"]\r\n return MatchDetails(rosters, participants, players), telemetryURL\r\n\r\n except error.HTTPError as requestError:\r\n if requestError.code == 404:\r\n print(\"Server error: NOT_FOUND. No result for search.\")\r\n elif requestError.code == 429:\r\n print(\"Server error: TOO_MANY_REQUESTS. Request limit reached.\")\r\n else:\r\n print(requestError)\r\n return None\r\n\r\n def GetTelemetry(self, URL):\r\n try:\r\n with request.urlopen(request.Request(URL, headers={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\", \"Accept\": \"application/json\"})) as telemetry:\r\n return Events(loads(telemetry.read()))\r\n\r\n except error.HTTPError as requestError:\r\n if requestError.code == 400:\r\n print(\"Server error: BAD_REQUEST. Error processing JSON.\")\r\n elif requestError.code == 404:\r\n print(\"Server error: NOT_FOUND. No result for search.\")\r\n elif requestError.code == 429:\r\n print(\"Server error: TOO_MANY_REQUESTS. Search limit reached.\")\r\n else:\r\n print(requestError)\r\n return None\r\n", "sub_path": "VaingloryAPI.py", "file_name": "VaingloryAPI.py", "file_ext": "py", "file_size_in_byte": 10341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "urllib.request.urlopen", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 39, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "VaingloryPlayer.Player", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 53, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 53, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 67, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 67, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 67, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "VaingloryPlayer.Player", "line_number": 69, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 79, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 79, "usage_type": "name"}, {"api_name": "VaingloryPlayer.Player", "line_number": 89, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 98, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 98, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 99, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 102, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 102, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 102, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 106, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 106, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 110, "usage_type": "name"}, {"api_name": "dateutil.tz.tzlocal", "line_number": 112, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 112, "usage_type": "name"}, {"api_name": "dateutil.tz.tzutc", "line_number": 112, "usage_type": "call"}, {"api_name": "dateutil.tz.tzlocal", "line_number": 113, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 113, "usage_type": "name"}, {"api_name": "dateutil.tz.tzutc", "line_number": 113, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 115, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 115, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 115, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 116, "usage_type": "call"}, {"api_name": "VaingloryMatch.Match", "line_number": 119, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 120, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 120, "usage_type": "name"}, {"api_name": "dateutil.tz.tzutc", "line_number": 120, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 120, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 126, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 126, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 126, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 127, "usage_type": "call"}, {"api_name": "VaingloryMatch.Match", "line_number": 129, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 130, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 130, "usage_type": "name"}, {"api_name": "dateutil.tz.tzutc", "line_number": 130, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 130, "usage_type": "name"}, {"api_name": "urllib.error.HTTPError", "line_number": 137, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 137, "usage_type": "name"}, {"api_name": "VaingloryMatch.Match", "line_number": 149, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 155, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 155, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 155, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 158, "usage_type": "call"}, {"api_name": "VaingloryMatch.MatchDetails", "line_number": 167, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 169, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 169, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 180, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 180, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 180, "usage_type": "call"}, {"api_name": "VaingloryEvents.Events", "line_number": 181, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 181, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 183, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 183, "usage_type": "name"}]} +{"seq_id": "161872648", "text": "from baseclient import BaseClient \nimport requests \nimport json \n \n \n \n \nclass IdFromUsername(BaseClient): \n BASE_URL = 'https://api.vk.com/method/users.get' \n http_method = 'GET' \n \n \n def __init__(self, name): \n self.name = name \n \n \n def get_params(self): \n return 'user_ids=' + self.name \n \n \n def response_handler(self, response): \n try: \n uobj = json.loads(response.text) \n return uobj.get('response')[0].get('uid') \n except: \n raise Exception(\"Couldn't handle response for username {}\".format(self.name)) \n \n \n def _get_data(self, method, http_method): \n response = None \n response = requests.get(self.BASE_URL + '?' + self.get_params()) \n return self.response_handler(response) ", "sub_path": "Lab 3/id_from_username.py", "file_name": "id_from_username.py", "file_ext": "py", "file_size_in_byte": 786, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "baseclient.BaseClient", "line_number": 8, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "538965064", "text": "import scrapy\nimport requests \nimport time\nfrom .item import CustomProxyItem\n\nclass CustomProxy(scrapy.Spider):\n name = 'xicispider'\n urls = []\n user_agent = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11\"\n \n for i in range(1,9):\n url = r'http://www.xicidaili.com/nn/%s' % str(i)\n urls.append(url)\n\n def start_requests(self):\n headers = {'User-Agent': self.user_agent}\n for url in self.urls:\n yield scrapy.Request(url=url, headers=headers, callback=self.parse)\n \n def parse(self, response):\n item = CustomProxyItem()\n all_list = response.css('table#ip_list tr')\n for i in range(1, len(all_list)):\n address = all_list[i].css('td::text').extract()[0]\n port = all_list[i].css('td::text').extract()[1]\n types = all_list[i].css('td::text').extract()[5]\n\n if self.filter(address, port, types) == False:\n continue\n\n item['proxy_address'] = address\n item['proxy_port'] = port\n item['proxy_type'] = types\n\n yield item\n\n def filter(self, address, port, types):\n if types == 'HTTP':\n return False #proxy = {'http': 'http://%s:%s' % (address, port)}\n elif types == 'HTTPS':\n proxy = {'https': 'http://%s:%s' % (address, port)}\n else:\n return False\n\n try:\n if requests.get('https://movie.douban.com/top250', proxies=proxy, timeout=0.5).status_code == 200:\n return True\n else:\n return False\n except:\n return False", "sub_path": "customProxy/customProxy/spiders/customProxy.py", "file_name": "customProxy.py", "file_ext": "py", "file_size_in_byte": 1677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 18, "usage_type": "call"}, {"api_name": "item.CustomProxyItem", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "561460971", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nX=np.random.rand(100,1)\nX_b=np.c_[np.ones((100,1)),X]\nY=5+7*X+np.random.randn(100,1)\n\n\nflag=False\nn_epochs=2000\nm=100\nlearn_rate=0.1\nt0,t1=10,50\nmini_bat=5\ndef learning_schedule(t):\n return t0/(t1+t)\n\ntheta=np.random.randn(2,1)\n\nfor epoch in range(n_epochs):\n for j in range(m):\n #random line\n random_index=np.random.randint(m-mini_bat)\n xi=X_b[random_index:random_index+mini_bat]\n yi=Y[random_index:random_index+mini_bat]\n gradient=xi.T.dot(xi.dot(theta)-yi)/mini_bat\n\n learn_rate=j+epoch*m\n learn_rate=learning_schedule(learn_rate)\n theta=theta-gradient*learn_rate\n\nprint(theta)\nprint(flag)\n\nX_test=np.array([[0],[1]])\nX_test_b=np.c_[np.ones((2,1)),X_test]\nY_test=X_test_b.dot(theta)\nplt.plot(X,Y,'b.')\nplt.plot(X_test,Y_test,'r-')\nplt.axis([0,1,0,15])\nplt.show()", "sub_path": "random_gradients_descent.py", "file_name": "random_gradients_descent.py", "file_ext": "py", "file_size_in_byte": 879, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.random.rand", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 4, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "9954150", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nPart of the fieldrecorder python modules written to handle sychronised audio-video recordings \nwith ASIO based soundcards and the TeAx ThermalCapture cameras. \n\nThis version is built with the express purpose of making recordings for the series of Phyllo experiments carried out \nat the lab over the summer of 2020. \n\nThe output of the recordings need have the following features:\n\n1. Direct disk saving of the data as it is captured from device. \n2. Recordings are triggered by a keyboard touch, and cannot be stopped until the designated time is over. \n All recordings need to of a fixed length +/- some time (few seconds offset on purpose, to allow easy\n correspondence matching between audio and video files).\n3. Every file will have a unique counter number, with the counter number increasing by 1 per recording - this number is dependent on the \ncomputer which is running the whole setup. \n\n\n@author: Thejasvi Beleyur, August 2020\nCode released under an MIT License\n\"\"\"\nimport os\nimport Queue\nimport datetime as dt\nimport time\nimport numpy as np\nimport pandas as pd\nimport sounddevice as sd\nfrom scipy import signal\nimport soundfile as sf\nimport matplotlib.pyplot as plt\nplt.rcParams['agg.path.chunksize'] = 10000\nfrom pynput.keyboard import Listener\n\n\n\nclass fieldrecorder_phyllo():\n\n def __init__(self,rec_durn,device_name=None,input_output_chs=(2,2),target_dir = '~\\\\Desktop\\\\',**kwargs):\n '''\n\n Inputs:\n rec_durn : float. duration of the whole session in seconds\n device_name : string. name of the device as shown by sd.query_devices()\n Defaults to None - which will throw an error if there are not at\n least 3 output channels\n\n input_output_chs: tuple with integers. Number of channels for recording and playback.\n\n target_dir : file path. place where the output WAV files will be saved\n\n **kwargs:\n exclude_channels: list with integers. These channels will not be saved\n into the WAV file. Defaults to the digital channels\n in the double Fireface UC setup\n\n one_recording_duration\n one_recording_pm\n\n '''\n self.rec_durn = rec_durn\n self.press_count = 0\n self.start_recording = False\n self.sync_freq = 25\n self.device_name = device_name\n self.input_output_chs = input_output_chs\n self.target_dir = target_dir\n \n self.one_recording_duration = kwargs.get('one_recording_duration',300) # seconds\n self.one_recording_pm = kwargs.get('one_recording_pm', np.arange(0,5,0.25)) # the additional range with which all recordings are expected to vary.\n try:\n self.counter_file = kwargs['counter_file']\n except:\n raise ValueError('The path of the counter file has not been declared!!')\n\n if self.device_name is None:\n self.tgt_ind = None\n else:\n self.get_device_indexnumber(self.device_name)\n\n try:\n expanded_path = os.path.expanduser(target_dir)\n os.chdir(expanded_path)\n except:\n raise ValueError('Unable to find the target directory: ' + target_dir)\n\n self.all_recchannels = range(self.input_output_chs[0])\n \n self.exclude_channels = kwargs.get('exclude_channels', [])\n\n self.save_channels = list(set(self.all_recchannels) - set(self.exclude_channels))\n\n def thermoacousticpy(self):\n '''\n Performs the synchronised recording of thermal cameras and audio.\n\n '''\n\n self.fs = 192000\n one_cycledurn = 1.0/self.sync_freq\n num_cycles = 1\n sig_durn = num_cycles*one_cycledurn\n t = np.linspace(0,sig_durn,int(self.fs*sig_durn))\n sine_fn = 2*np.pi*self.sync_freq*t + np.pi\n\n self.sync_signal = np.float32( signal.square(sine_fn,0.5) )\n self.sync_signal *= 0.25\n\n trigger_freq = 20*10**3\n\n # conv to 32 bit so sounddevice can take the signals as inputs\n self.trigger_signal = np.float32(np.sin(2*np.pi*t*trigger_freq))\n self.empty_signal = np.float32(np.zeros(self.sync_signal.size))\n\n self.only_sync = np.column_stack((self.sync_signal, self.empty_signal,\n self.empty_signal))\n\n self.trig_and_sync = np.column_stack((self.sync_signal, self.trigger_signal,\n self.sync_signal))\n\n\n self.S = sd.Stream(samplerate=self.fs,blocksize=self.sync_signal.size,\n channels=self.input_output_chs,device=self.tgt_ind,\n latency='low')\n\n start_time = np.copy(self.S.time)\n session_time = np.copy(self.S.time)\n session_end_time = start_time + self.rec_durn\n \n\n self.q = Queue.Queue()\n\n self.S.start()\n\n kb_input = Listener(on_press=self.on_press)\n\n kb_input.start()\n print('Trying to initiate recordings...')\n try:\n\n while session_time < session_end_time:\n if self.start_recording:\n audiofilename = self.make_filename()\n \n \n now = time.time() \n recording_endtime = now + self.one_recording_duration + float(np.random.choice(self.one_recording_pm,1))\n print('Approx. end time of recording:', time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(recording_endtime)))\n with sf.SoundFile(audiofilename, mode='x', samplerate=self.fs,\n channels=len(self.save_channels)) as file:\n while time.time() < recording_endtime:\n \n data, success = self.S.read(self.trig_and_sync.shape[0])\n file.write(data[:,self.save_channels])\n self.S.write(self.trig_and_sync)\n self.start_recording = False\n print('Recording done- press any key to trigger the next recording... \\n'+'The saved filename is: ' + audiofilename)\n self.increment_filecounter()\n\n else :\n self.S.write(self.only_sync)\n\n session_time = self.S.time\n\n kb_input.stop()\n\n except (KeyboardInterrupt, SystemExit):\n\n print('Stopping recording ..exiting ')\n\n kb_input.stop()\n\n\n self.S.stop()\n\n print('Queue size is',self.q.qsize())\n\n return(self.fs,self.rec)\n\n def make_filename(self):\n '''\n Creates a file name that begins with MULTIWAV_YYYY-MM-DD_hh-mm-ss_UNIQUENUMBER\n '''\n timenow = dt.datetime.now()\n self.timestamp = timenow.strftime('%Y-%m-%d_%H-%M-%S')\n self.idnumber = int(time.mktime(timenow.timetuple())) #the unix time which provides a 10 digit unique identifier\n\n prefix_filename = 'MULTIWAV_' + self.timestamp+'_'+str(self.idnumber) \n print('UNIQUE COUNTER NOT IMPLEMENTED YET!!!..................')\n \n self.unique_number = int(pd.read_csv(self.counter_file)['recording_number'])\n final_filename = prefix_filename+'_'+str(self.unique_number)+'.wav'\n return final_filename\n \n def increment_filecounter(self):\n '''\n Increments filenumber count and saves the data into the counter file. \n '''\n new_df = pd.DataFrame(data={'recording_number':[self.unique_number+1]})\n new_df.to_csv(self.counter_file)\n\n def on_press(self,key):\n\n # if a recording has already been initiated then don't do anything\n if self.start_recording:\n print('Recording underway - wait till current recording is done!!')\n pass\n else:\n print('button pressed....\\n')\n self.start_recording = True\n print('recording started.....')\n\n\n def empty_qcontentsintolist(self):\n try:\n self.q_contents = [ self.q.get()[0] for i in range(self.q.qsize()) ]\n\n except:\n raise IOError('Unable to empty queue object contents')\n\n pass\n\n def save_qcontents_aswav(self):\n\n print('Saving file now...')\n\n self.rec = np.concatenate(self.q_contents)\n\n self.rec2besaved = self.rec[:,self.save_channels]\n\n \n\n try:\n print('trying to save file... ')\n\n soundfile.write(main_filename,self.rec2besaved,self.fs)\n\n print('File saved')\n\n pass\n\n except:\n raise IOError('Could not save file !!')\n\n\n pass\n\n\n\n def get_device_indexnumber(self,device_name):\n '''\n Check for the device name in all of the recognised devices and\n return the index number within the list.\n\n '''\n self.device_list = sd.query_devices()\n\n self.tgt_dev_name = device_name\n self.tgt_dev_bool = [self.tgt_dev_name in each_device['name'] for each_device in self.device_list]\n\n if not True in self.tgt_dev_bool:\n\n print (sd.query_devices())\n\n raise ValueError('The input device \\n' + self.tgt_dev_name+\n '\\n could not be found, please look at the list above'+\n ' for all the recognised devices'+\n ' \\n Please use sd.query_devices to check the recognised'\n +' devices on this computer')\n\n if sum(self.tgt_dev_bool) > 1 :\n raise ValueError('Multiple devices with the same string found'\n + ' please enter a more specific device name'\n ' \\n Please use sd.query_devices to check the recognised'+\n ' devices on this computer')\n\n else:\n self.tgt_ind = int(np.argmax(np.array(self.tgt_dev_bool)))\n\n\n \n\nif __name__ == '__main__':\n print('Starting recording session.....................')\n\n dev_name = 'ASIO'\n in_out_channels = (32,3)\n tgt_direcory = 'C:\\\\Users\\\\batmobil\\\\Documents\\\\phyllo_expts_july2020\\\\'\n\n channels_to_exclude = [12,13,14,15, 28,29,30,31]\n a = fieldrecorder_phyllo(9000, input_output_chs= in_out_channels, device_name= dev_name,\n target_dir= tgt_direcory, exclude_channels=channels_to_exclude,one_recording_duration=3,\n one_recording_pm =np.arange(0,0.5,0.05),\n counter_file='~\\\\Desktop\\\\recording_counter.csv')\n fs,rec= a.thermoacousticpy()\n\n", "sub_path": "fieldrecorder_phyllo.py", "file_name": "fieldrecorder_phyllo.py", "file_ext": "py", "file_size_in_byte": 10550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 106, "usage_type": "call"}, {"api_name": "scipy.signal.square", "line_number": 106, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 118, "usage_type": "call"}, {"api_name": "sounddevice.Stream", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 127, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 131, "usage_type": "call"}, {"api_name": "pynput.keyboard.Listener", "line_number": 135, "usage_type": "call"}, {"api_name": "time.time", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 147, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 148, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 148, "usage_type": "call"}, {"api_name": "soundfile.SoundFile", "line_number": 149, "usage_type": "call"}, {"api_name": "time.time", "line_number": 151, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 186, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 191, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 227, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 236, "usage_type": "call"}, {"api_name": "sounddevice.query_devices", "line_number": 256, "usage_type": "call"}, {"api_name": "sounddevice.query_devices", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 293, "usage_type": "call"}]} +{"seq_id": "253202035", "text": "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nimport numpy as np\n\nfrom scipy import integrate\n\nsns.set_theme()\n\ncolnames=[ 'X', 'Y', 'Z', \"ACCURACY\", \"TIMESTAMP\"]\nacce1 = pd.read_csv(\"./data/acce1.csv\", names=colnames)\nacce1[\"TIME\"] = (acce1[\"TIMESTAMP\"] - acce1[\"TIMESTAMP\"].iloc[0])/1000000000\n# print(acce1.head())\nplt.figure()\nsns.lineplot(\n data=acce1,\n x=\"TIME\", y=\"X\", label=\"X\")\nsns.lineplot(\n data=acce1,\n x=\"TIME\", y=\"Y\" ,label=\"Y\")\nsns.lineplot(\n data=acce1,\n x=\"TIME\", y=\"Z\",label=\"Z\")\n\nacce1[\"SAMPLING_TIME\"] = acce1[\"TIME\"].diff()\n# print(acce1.head())\n\nsampling_freq = 1 / acce1[\"SAMPLING_TIME\"].mean()\n\norder=5\n\ncutoff_freq = sampling_freq / 1400\n\n# number_of_samples = len(acce1)\n\n# time = np.linspace(0, acce1[\"SAMPLING_TIME\"].mean(), number_of_samples, endpoint=False)\n\nnormalized_cutoff_freq = 2 * cutoff_freq / sampling_freq\n#prepare filter\nnumerator_coeffs, denominator_coeffs = signal.butter(order, normalized_cutoff_freq)\n\nfiltered_signal_X = signal.lfilter(numerator_coeffs, denominator_coeffs, acce1[\"X\"])\nfiltered_signal_Y = signal.lfilter(numerator_coeffs, denominator_coeffs, acce1[\"Y\"])\nfiltered_signal_Z = signal.lfilter(numerator_coeffs, denominator_coeffs, acce1[\"Z\"])\n\nacce1[\"X_filter\"] = filtered_signal_X\nacce1[\"Y_filter\"] = filtered_signal_Y\nacce1[\"Z_filter\"] = filtered_signal_Z\n\nacce1[\"X_filter_abs\"] = acce1[\"X_filter\"].abs()\nacce1[\"Y_filter_abs\"] = acce1[\"Y_filter\"].abs()\nacce1[\"Z_filter_abs\"] = acce1[\"Z_filter\"].abs()\n\nhigh_numerator_coeffs, high_denominator_coeffs = signal.butter(order, normalized_cutoff_freq,btype=\"highpass\")\n\n# acce1[\"X_filter_abs\"] = signal.lfilter(high_numerator_coeffs, high_denominator_coeffs, acce1[\"X_filter_abs\"])\n# acce1[\"Y_filter_abs\"] = signal.lfilter(high_numerator_coeffs, high_denominator_coeffs, acce1[\"Y_filter_abs\"])\n# acce1[\"Z_filter_abs\"] = signal.lfilter(high_numerator_coeffs, high_denominator_coeffs, acce1[\"Z_filter_abs\"])\n\n\nacce1[\"X_velocity\"] = integrate.cumtrapz(acce1[\"X_filter\"], x=acce1[\"TIME\"], initial=0)\nacce1[\"Y_velocity\"] = integrate.cumtrapz(acce1[\"Y_filter\"], x=acce1[\"TIME\"], initial=0)\nacce1[\"Z_velocity\"] = integrate.cumtrapz(acce1[\"Z_filter\"], x=acce1[\"TIME\"], initial=0)\n\nacce1.loc[acce1[\"X_filter_abs\"] < 0.0015, 'X_velocity'] = 0\nacce1.loc[acce1[\"Y_filter_abs\"] < 0.0015, 'Y_velocity'] = 0\nacce1.loc[acce1[\"Z_filter_abs\"] < 0.0015, 'Z_velocity'] = 0\n\nlp_X = []\nfor index, value in acce1[\"X_velocity\"].iteritems():\n if value != 0:\n lp_X.append(index)\n\n# print(lp_X)\nlpn_X = []\nlkn_X = []\nfor index, value in enumerate(lp_X):\n if value != lp_X[-1]:\n if value+1 != lp_X[index+1]:\n lkn_X.append(value)\n if value-1 != lp_X[index-1]:\n lpn_X.append(value)\n else:\n lkn_X.append(value)\nprint(\"LPN_X: \")\nprint(lpn_X)\nprint(\"LKN_X: \")\nprint(lkn_X)\nfor index, begin in enumerate(lpn_X):\n if index !=0:\n acce1[\"X_velocity\"][lkn_X[index-1]:lpn_X[index]] = acce1[\"X_velocity\"][lkn_X[index-1]]\n\n# print(len(acce1.loc[acce1[\"X_velocity\"] == 0]))\n\n# Y_velocity\noriginal_lpn_Y = [1867, 3548, 4974, 5975, 7072, 7791, 8594, 9727]\noriginal_lkn_Y = [2095, 4140, 5351, 6432, 7306, 8136, 8885, 9957]\nprint(original_lkn_Y)\nprint(original_lpn_Y)\n\n\n\nlp_Y = []\nfor index, value in acce1[\"Y_velocity\"].iteritems():\n if value != 0:\n lp_Y.append(index)\n# print(lp_Y)\nlpn_Y = []\nlkn_Y = []\nfor index, value in enumerate(lp_Y):\n if value != lp_Y[-1]:\n if value+1 != lp_Y[index+1]:\n lkn_Y.append(value)\n if value-1 != lp_Y[index-1]:\n lpn_Y.append(value)\n else:\n lkn_Y.append(value)\n\nprint(\"LPN_Y: \")\nprint(lpn_Y)\nprint(\"LKN_Y: \")\nprint(lkn_Y)\n\n\nfor index, begin in enumerate(lpn_Y):\n if index !=0:\n acce1[\"Y_velocity\"][begin-1:lkn_Y[index]] = acce1[\"Y_velocity\"][begin-1:lkn_Y[index]] + (abs(acce1[\"Y_velocity\"][lpn_Y[index]]) - abs(acce1[\"Y_velocity\"][lkn_Y[index-1]]) )\n acce1[\"Y_velocity\"][lkn_Y[index-1]:lpn_Y[index]] = acce1[\"Y_velocity\"][lkn_Y[index-1]]\n\n\nlp_Z = []\nfor index, value in acce1[\"Z_velocity\"].iteritems():\n if value != 0:\n lp_Z.append(index) \n\n# print(lp_Z)\nlpn_Z = []\nlkn_Z = []\nfor index, value in enumerate(lp_Z):\n if value != lp_Z[-1]:\n if value+1 != lp_Z[index+1]:\n lkn_Z.append(value)\n if value-1 != lp_Z[index-1]:\n lpn_Z.append(value)\n else:\n lkn_Z.append(value)\nprint(\"LPN_Z: \")\nprint(lpn_Z)\nprint(\"LKN_Z: \")\nprint(lkn_Z)\n\nacce1[\"Z_velocity\"][lpn_Z[1]-1:lkn_Z[1]] = acce1[\"Z_velocity\"][lpn_Z[1]-1:lkn_Z[1]] - (acce1[\"Z_velocity\"][lpn_Z[1]] - acce1[\"Z_velocity\"][lkn_Z[0]])\nacce1[\"Z_velocity\"][lpn_Z[2]-1:lkn_Z[2]] = acce1[\"Z_velocity\"][lpn_Z[2]-1:lkn_Z[2]] - (acce1[\"Z_velocity\"][lpn_Z[1]] - acce1[\"Z_velocity\"][lkn_Z[0]])\nfor index, value in acce1[\"Z_velocity\"][lpn_Z[2]-1:lkn_Z[2]].iteritems():\n if value > 0:\n acce1[\"Z_velocity\"][index] = acce1[\"Z_velocity\"][index] - ( acce1[\"Z_velocity\"][lkn_Z[1]] - acce1[\"Z_velocity\"][lpn_Z[2]])\n else:\n acce1[\"Z_velocity\"][index] = acce1[\"Z_velocity\"][index] + ( acce1[\"Z_velocity\"][lkn_Z[1]] - acce1[\"Z_velocity\"][lpn_Z[2]])\n\nacce1[\"Z_velocity\"][lpn_Z[3]-1:lkn_Z[3]] = acce1[\"Z_velocity\"][lpn_Z[3]-1:lkn_Z[3]] + (abs(acce1[\"Z_velocity\"][lpn_Z[3]]) - abs(acce1[\"Z_velocity\"][lkn_Z[2]]))\n\nacce1[\"Z_velocity\"][lpn_Z[4]-1:lkn_Z[4]] = acce1[\"Z_velocity\"][lpn_Z[4]-1:lkn_Z[4]] + (abs(acce1[\"Z_velocity\"][lpn_Z[4]]) - abs(acce1[\"Z_velocity\"][lkn_Z[3]]))\n\nacce1[\"Z_velocity\"][lpn_Z[4]-1:lkn_Z[4]] = acce1[\"Z_velocity\"][lpn_Z[4]-1:lkn_Z[4]] + (abs(acce1[\"Z_velocity\"][lpn_Z[4]]) - abs(acce1[\"Z_velocity\"][lkn_Z[3]]))\n\nacce1[\"Z_velocity\"][lpn_Z[5]-1:lkn_Z[5]] = acce1[\"Z_velocity\"][lpn_Z[5]-1:lkn_Z[5]] + (abs(acce1[\"Z_velocity\"][lpn_Z[5]]) - abs(acce1[\"Z_velocity\"][lkn_Z[4]]))\n\nacce1[\"Z_velocity\"][lpn_Z[6]-1:lkn_Z[6]] = acce1[\"Z_velocity\"][lpn_Z[6]-1:lkn_Z[6]] + (abs(acce1[\"Z_velocity\"][lpn_Z[6]]) - abs(acce1[\"Z_velocity\"][lkn_Z[5]]))\nacce1[\"Z_velocity\"][lpn_Z[7]-1:lkn_Z[7]] = acce1[\"Z_velocity\"][lpn_Z[7]-1:lkn_Z[7]] + (abs(acce1[\"Z_velocity\"][lpn_Z[7]]) - abs(acce1[\"Z_velocity\"][lkn_Z[6]]))\n\nfor index, begin in enumerate(lpn_Z):\n if index !=0:\n acce1[\"Z_velocity\"][lkn_Z[index-1]:lpn_Z[index]] = acce1[\"Z_velocity\"][lkn_Z[index-1]]\n\nplt.figure()\nplt.title(\"FILTER ACCELERATION ABSOLUTE VALUES\")\nsns.lineplot(y=acce1[\"X_filter_abs\"], x=acce1[\"TIME\"],label=\"X\")\nsns.lineplot(y=acce1[\"Y_filter_abs\"], x=acce1[\"TIME\"],label=\"Y\")\nsns.lineplot(y=acce1[\"Z_filter_abs\"], x=acce1[\"TIME\"],label=\"Z\")\n\n# print(acce1.head())\n\nplt.figure()\nplt.title(\"FILTERED ACCELERATION X,Y,Z\")\nsns.lineplot(y=filtered_signal_X, x=acce1[\"TIME\"] , label=\"X\")\nsns.lineplot(y=filtered_signal_Y, x=acce1[\"TIME\"], label=\"Y\")\nsns.lineplot(y=filtered_signal_Z, x=acce1[\"TIME\"], label=\"Z\")\n\n\nplt.figure()\nplt.title(\"VELOCITY X,Y,Z\")\nsns.lineplot(y=acce1[\"X_velocity\"], x=acce1[\"TIME\"], label=\"X\")\nsns.lineplot(y=acce1[\"Y_velocity\"], x=acce1[\"TIME\"], label=\"Y\")\nsns.lineplot(y=acce1[\"Z_velocity\"], x=acce1[\"TIME\"], label=\"Z\")\n\n# acce1[\"X_velocity\"] = signal.lfilter(high_numerator_coeffs, high_denominator_coeffs, acce1[\"X_velocity\"])\n# acce1[\"Y_velocity\"] = signal.lfilter(high_numerator_coeffs, high_denominator_coeffs, acce1[\"Y_velocity\"])\n# acce1[\"Z_velocity\"] = signal.lfilter(high_numerator_coeffs, high_denominator_coeffs, acce1[\"Z_velocity\"])\n\n\nacce1[\"X_POSITION\"] = integrate.cumtrapz(acce1[\"X_velocity\"],x=acce1[\"TIME\"], initial=0)\nacce1[\"Y_POSITION\"]= integrate.cumtrapz(acce1[\"Y_velocity\"],x=acce1[\"TIME\"], initial=0)\nacce1[\"Z_POSITION\"]= integrate.cumtrapz(acce1[\"Z_velocity\"],x=acce1[\"TIME\"],initial=0)\n\nplt.figure()\nplt.title(\"POSITION X,Y,Z\")\nsns.lineplot(y=acce1[\"X_POSITION\"], x=acce1[\"TIME\"], label=\"X\")\nsns.lineplot(y=acce1[\"Y_POSITION\"], x=acce1[\"TIME\"], label=\"Y\")\nsns.lineplot(y=acce1[\"Z_POSITION\"], x=acce1[\"TIME\"],label=\"Z\")\n\nplt.show()\n", "sub_path": "SensorsData/sensors_old.py", "file_name": "sensors_old.py", "file_ext": "py", "file_size_in_byte": 7881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "seaborn.set_theme", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 16, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 19, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.signal.butter", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 41, "usage_type": "name"}, {"api_name": "scipy.signal.lfilter", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 43, "usage_type": "name"}, {"api_name": "scipy.signal.lfilter", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 44, "usage_type": "name"}, {"api_name": "scipy.signal.lfilter", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 45, "usage_type": "name"}, {"api_name": "scipy.signal.butter", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 55, "usage_type": "name"}, {"api_name": "scipy.integrate.cumtrapz", "line_number": 62, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 62, "usage_type": "name"}, {"api_name": "scipy.integrate.cumtrapz", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 63, "usage_type": "name"}, {"api_name": "scipy.integrate.cumtrapz", "line_number": 64, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 178, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 179, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 186, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 187, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 193, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 194, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 195, "usage_type": "call"}, {"api_name": "scipy.integrate.cumtrapz", "line_number": 202, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 202, "usage_type": "name"}, {"api_name": "scipy.integrate.cumtrapz", "line_number": 203, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 203, "usage_type": "name"}, {"api_name": "scipy.integrate.cumtrapz", "line_number": 204, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 208, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 209, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}]} +{"seq_id": "157199022", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom sklearn.model_selection import train_test_split, KFold, GridSearchCV\nfrom sklearn.linear_model import LinearRegression, Lasso, ElasticNet, Ridge\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\n\nclass LassoModel:\n def __init__(self, alpha):\n self.model = Lasso(alpha=alpha, selection='random')\n self.score = None\n self.mae = None\n self.mse = None\n\n def train(self, x, y, num_folds):\n x = np.array(x)\n y = np.array(y)\n folder = KFold(n_splits=num_folds, random_state=2, shuffle=True)\n for i, (train_idx, test_idx) in enumerate(folder.split(x)):\n x_train, y_train = x[train_idx, :], y[train_idx]\n x_test, y_test = x[test_idx, :], y[test_idx]\n self.model.fit(x_train, y_train)\n self.score = self.model.score(x_train, y_train)\n y_pre = self.model.predict(x_test)\n mae = mean_absolute_error(y_pre, y_test)\n mse = mean_squared_error(y_pre, y_test)\n print(\"Lasso's {} fold Coefficients: {}\".format(i, self.model.coef_))\n print(\"Lasso's {} fold Score: {}\".format(i, self.score))\n print(\"Lasso's {} fold mean absolute error: {}\".format(i, mae))\n print(\"Lasso's {} fold mean squared error: {}\".format(i, mse))\n\n def test(self, x, y):\n y_pre = self.model.predict(x)\n self.mae = mean_absolute_error(y_pre, y)\n self.mse = mean_squared_error(y_pre, y)\n print(\"Lasso的预测mae是{}, mse是{}\".format(self.mae, self.mse))\n\n @staticmethod\n def test_lasso_alpha(x, y, alphas=[0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50]):\n x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.9, random_state=2)\n scores = []\n for i, alpha in enumerate(alphas):\n model = Lasso(alpha=alpha)\n model.fit(x_train, y_train)\n scores.append(model.score(x_test, y_test))\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(alphas, scores)\n ax.set_xlabel(r\"$\\alpha$\")\n ax.set_ylabel(r\"score\")\n ax.set_xscale('log')\n ax.set_title('Lasso')\n plt.show()\n\n\nclass LinearModel:\n def __init__(self):\n self.model = LinearRegression(fit_intercept=False)\n self.score = None\n self.mae = None\n self.mse = None\n\n def train(self, x, y):\n self.model.fit(x, y)\n self.score = self.model.score(x, y)\n print('Linear Coefficients: {}'.format(self.model.coef_))\n print('Linear Score: {}'.format(self.score))\n\n def test(self, x, y):\n y_pre = self.model.predict(x)\n self.mae = mean_absolute_error(y_pre, y)\n self.mse = mean_squared_error(y_pre, y)\n print(\"Linear的预测mae是{}, mse是{}\".format(self.mae, self.mse))\n\n\nclass ElasticNetModel:\n def __init__(self):\n self.model = ElasticNet(selection='random')\n self.score = None\n self.mae = None\n self.mse = None\n\n def train(self, x, y):\n self.model.fit(x, y)\n self.score = self.model.score(x, y)\n print('ElasticNet Coefficients: {}'.format(self.model.coef_))\n print('ElasticNet Score: {}'.format(self.score))\n\n def test(self, x, y):\n y_pre = self.model.predict(x)\n self.mae = mean_absolute_error(y_pre, y)\n self.mse = mean_squared_error(y_pre, y)\n print(\"ElasticNet的预测mae是{}, mse是{}\".format(self.mae, self.mse))\n\n @staticmethod\n def grid_search_alpha_rho(x, y):\n \"\"\"\n 通过GridSearch搜索最优模型参数\n Args:\n x: 训练集自变量\n y: 训练集因变量\n return: 最优参数组合\n \"\"\"\n els_parameters = {\n 'alpha': np.logspace(-2, 2),\n 'l1_ratio': np.linspace(0.01, 1)\n }\n eln = ElasticNet()\n eln_gs = GridSearchCV(estimator=eln, param_grid=els_parameters, cv=5)\n eln_gs.fit(x, y)\n print('Elastic 最大回归系数为 {}'.format(eln_gs.best_score_))\n return eln_gs.best_params_\n\n @staticmethod\n def test_elastic_net_alpha_rho(x, y):\n x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.9, random_state=2)\n alphas = np.logspace(-2, 2)\n rhos = np.linspace(0.01, 1)\n scores = []\n for alpha in alphas:\n for rho in rhos:\n model = ElasticNet(alpha=alpha, l1_ratio=rho)\n model.fit(x_train, y_train)\n scores.append(model.score(x_test, y_test))\n # 绘制三维图\n alphas, rhos = np.meshgrid(alphas, rhos)\n scores = np.array(scores).reshape(alphas.shape)\n fig = plt.figure()\n ax = Axes3D(fig)\n surf = ax.plot_surface(alphas, rhos, scores, rstride=1, cstride=1)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n ax.set_xlabel(r\"$\\alpha$\")\n ax.set_ylabel(r\"$\\rho$\")\n ax.set_zlabel('score')\n ax.set_title('ElasticNet')\n plt.show()\n\n\nclass RidgeModel:\n def __init__(self):\n self.model = Ridge()\n self.score = None\n\n def test_Ridge_alpha(self, x, y):\n alphas = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5]\n scores = []\n for i, alpha in enumerate(alphas):\n reg_r = Ridge(alpha=alpha)\n reg_r.fit(x, y)\n scores.append(reg_r.score(x, y))\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(alphas, scores)\n ax.set_xlabel(r\"$ \\alpha $\")\n ax.set_ylabel(r\"score\")\n ax.set_xscale('log')\n ax.set_title(\"Ridge\")\n plt.show()\n\n def train(self, x, y):\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n self.model.fit(x_train, y_train)\n print(\"Coefficients : {}, intercept : {}\".format(self.model.coef_, self.model.intercept_))\n print(\"Residual sum of squares: {}\".format(np.mean(self.model.predict(x_test)-y_test)**2))\n print(\"Score: {}\".format(self.model.score(x_test, y_test)))\n\n\nif __name__ == '__main__':\n line_model = LassoModel(alpha=0.01)\n import numpy as np\n import pandas as pd\n\n data = pd.read_excel(\"/Users/ashzerm/item/GasOline/data/stand_oline.xlsx\")\n target = np.array(data['RON_LOSS'].copy())\n value = data[data.columns[10:]].copy()\n print(value.columns)\n value = np.array(value)\n # line_model.test_lasso_alpha(value, target)\n # model.train(data, target)\n # model.test(data, target)\n # els = ElasticNetModel(0.01)\n # els.test_elastic_net_alpha_rho(data, target)\n regr = RidgeModel()\n regr.test_Ridge_alpha(value, target)\n", "sub_path": "model/linear.py", "file_name": "linear.py", "file_ext": "py", "file_size_in_byte": 6758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sklearn.linear_model.Lasso", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNet", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNet", "line_number": 110, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 111, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNet", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 143, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 166, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 179, "usage_type": "call"}]} +{"seq_id": "509380876", "text": "# Copyright 2020-2021\n#\n# This file is part of HiPACE++.\n#\n# Authors: AlexanderSinn, Severin Diederichs\n# License: BSD-3-Clause-LBNL\n\nimport math\nimport openpmd_api as io\nimport numpy as np\nfrom numpy import random\nfrom scipy import constants\n\nn = 1000000\nbeam_density = 3.\nplasma_density = 2.8239587008591567e23\nbeam_position_mean = [0, 0, 0]\nbeam_position_std = [0.3, 0.3, 1.41]\nbeam_u_mean = [0, 0, 2000]\nbeam_u_std = [0, 0, 0]\n\nkp_inv = constants.c / constants.e * math.sqrt(constants.epsilon_0 * constants.m_e / plasma_density)\n\nsingle_weight = (beam_density * beam_position_std[0] * beam_position_std[1] *\n beam_position_std[2] * np.sqrt(2. * math.pi)**3 / n)\n\nrng = random.default_rng(seed=0)\ndata = np.zeros([6,n],dtype=np.float64)\n\nfor i in [0,1,2]:\n data[i]=rng.normal(beam_position_mean[i],beam_position_std[i],n)\n data[i+3]=rng.normal(beam_u_mean[i],beam_u_std[i],n)\n\nseries = io.Series(\"beam_%05T.h5\", io.Access.create)\n\ni = series.iterations[0]\n\nparticle = i.particles[\"Electrons\"]\n\nparticle.set_attribute(\"HiPACE++_Plasma_Density\", plasma_density)\n\ndataset = io.Dataset(data[0].dtype,data[0].shape)\n\nparticle[\"position\"].unit_dimension = {\n io.Unit_Dimension.L: 1,\n}\n\nparticle[\"momentum\"].unit_dimension = {\n io.Unit_Dimension.M: 1,\n io.Unit_Dimension.L: 1,\n io.Unit_Dimension.T: -1,\n}\n\nparticle[\"charge\"].unit_dimension = {\n io.Unit_Dimension.I: 1,\n io.Unit_Dimension.T: 1,\n}\n\nparticle[\"mass\"].unit_dimension = {\n io.Unit_Dimension.M: 1,\n}\n\nfor k,m in [[\"x\",0],[\"y\",1],[\"z\",2]]:\n particle[\"position\"][k].reset_dataset(dataset)\n particle[\"position\"][k].store_chunk(data[m])\n particle[\"position\"][k].unit_SI = kp_inv\n\nfor k,m in [[\"x\",3],[\"y\",4],[\"z\",5]]:\n particle[\"momentum\"][k].reset_dataset(dataset)\n particle[\"momentum\"][k].store_chunk(data[m])\n particle[\"momentum\"][k].unit_SI = constants.m_e * constants.c\n\nSCALAR = io.Mesh_Record_Component.SCALAR\n\nparticle[\"charge\"][SCALAR].reset_dataset(dataset)\nparticle[\"charge\"][SCALAR].make_constant(single_weight)\nparticle[\"charge\"][SCALAR].unit_SI = constants.e * plasma_density * kp_inv**3\n\nparticle[\"mass\"][SCALAR].reset_dataset(dataset)\nparticle[\"mass\"][SCALAR].make_constant(single_weight)\nparticle[\"mass\"][SCALAR].unit_SI = constants.m_e * plasma_density * kp_inv**3\n\nseries.flush()\n\ndel series\n", "sub_path": "tools/write_beam.py", "file_name": "write_beam.py", "file_ext": "py", "file_size_in_byte": 2330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scipy.constants.c", "line_number": 22, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 22, "usage_type": "name"}, {"api_name": "scipy.constants.e", "line_number": 22, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.constants.epsilon_0", "line_number": 22, "usage_type": "attribute"}, {"api_name": "scipy.constants.m_e", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 25, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 28, "usage_type": "attribute"}, {"api_name": "openpmd_api.Series", "line_number": 34, "usage_type": "call"}, {"api_name": "openpmd_api.Access", "line_number": 34, "usage_type": "attribute"}, {"api_name": "openpmd_api.Dataset", "line_number": 42, "usage_type": "call"}, {"api_name": "openpmd_api.Unit_Dimension", "line_number": 45, "usage_type": "attribute"}, {"api_name": "openpmd_api.Unit_Dimension", "line_number": 49, "usage_type": "attribute"}, {"api_name": "openpmd_api.Unit_Dimension", "line_number": 50, "usage_type": "attribute"}, {"api_name": "openpmd_api.Unit_Dimension", "line_number": 51, "usage_type": "attribute"}, {"api_name": "openpmd_api.Unit_Dimension", "line_number": 55, "usage_type": "attribute"}, {"api_name": "openpmd_api.Unit_Dimension", "line_number": 56, "usage_type": "attribute"}, {"api_name": "openpmd_api.Unit_Dimension", "line_number": 60, "usage_type": "attribute"}, {"api_name": "scipy.constants.m_e", "line_number": 71, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 71, "usage_type": "name"}, {"api_name": "scipy.constants.c", "line_number": 71, "usage_type": "attribute"}, {"api_name": "openpmd_api.Mesh_Record_Component", "line_number": 73, "usage_type": "attribute"}, {"api_name": "scipy.constants.e", "line_number": 77, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 77, "usage_type": "name"}, {"api_name": "scipy.constants.m_e", "line_number": 81, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "534588118", "text": "from game2048.game import Game\nfrom game2048.displays import Display\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef single_run(size, score_to_win, AgentClass, **kwargs):\n game = Game(size, score_to_win)\n agent = AgentClass(game, display=Display(), **kwargs)\n agent.play(verbose=True)\n return game.score\n\n\nif __name__ == '__main__':\n GAME_SIZE = 4\n SCORE_TO_WIN = 2048\n N_TESTS = 50\n\n if len(sys.argv) == 2:\n agent_name = sys.argv[1].split(\"=\")[-1]\n if agent_name == \"emagent\":\n from game2048.agents import ExpectiMaxAgent as TestAgent\n elif agent_name == \"pagent\":\n from task.agents import PlanningAgent as TestAgent\n elif agent_name == \"cnnagent\":\n from task.agents import CNNAgent as TestAgent\n else:\n print(\"WARNING: Agent class doesn't exist.\")\n else:\n # default\n from task.agents import CNNAgent as TestAgent\n\n '''====================\n Use ExpectiMaxAgent here.'''\n # from game2048.agents import ExpectiMaxAgent as TestAgent\n '''====================\n Use PlanningAgent here.'''\n # from task.agents import PlanningAgent as TestAgent\n '''====================\n Use CNNAgent here.'''\n # from task.agents import CNNAgent as TestAgent\n '''===================='''\n\n scores = []\n for i in range(N_TESTS):\n print(\"N_TESTS for :%d\" % i)\n score = single_run(GAME_SIZE, SCORE_TO_WIN,\n AgentClass=TestAgent)\n scores.append(score)\n\n # plt.plot(scores)\n # plt.xlabel(\"Loops\")\n # plt.ylabel(\"Score\")\n # plt.yticks([2**i for i in range(5,11):])\n # plt.title(\"Score Distribution Over %d Tests\" % N_TESTS)\n # plt.show()\n\n print(\"Average scores: @%s times\" % N_TESTS, sum(scores) / len(scores))\n", "sub_path": "evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 1806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "game2048.game.Game", "line_number": 8, "usage_type": "call"}, {"api_name": "game2048.displays.Display", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "task.agents.CNNAgent", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "456507388", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom util import sample_and_group \n\nclass Local_op(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(Local_op, self).__init__()\n self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False)\n self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm1d(out_channels)\n self.bn2 = nn.BatchNorm1d(out_channels)\n\n def forward(self, x):\n b, n, s, d = x.size() # torch.Size([32, 512, 32, 6]) \n x = x.permute(0, 1, 3, 2) \n x = x.reshape(-1, d, s) \n batch_size, _, N = x.size()\n x = F.relu(self.bn1(self.conv1(x))) # B, D, N\n x = F.relu(self.bn2(self.conv2(x))) # B, D, N\n x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)\n x = x.reshape(b, n, -1).permute(0, 2, 1)\n return x\n\nclass Pct(nn.Module):\n def __init__(self, args, output_channels=40):\n super(Pct, self).__init__()\n self.args = args\n self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)\n self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(64)\n self.gather_local_0 = Local_op(in_channels=128, out_channels=128)\n self.gather_local_1 = Local_op(in_channels=256, out_channels=256)\n\n self.pt_last = Point_Transformer_Last(args)\n\n self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False),\n nn.BatchNorm1d(1024),\n nn.LeakyReLU(negative_slope=0.2))\n\n\n self.linear1 = nn.Linear(1024, 512, bias=False)\n self.bn6 = nn.BatchNorm1d(512)\n self.dp1 = nn.Dropout(p=args.dropout)\n self.linear2 = nn.Linear(512, 256)\n self.bn7 = nn.BatchNorm1d(256)\n self.dp2 = nn.Dropout(p=args.dropout)\n self.linear3 = nn.Linear(256, output_channels)\n\n def forward(self, x):\n # x.shape = batch_size x 3 x 1024\n xyz = x.permute(0, 2, 1)\n batch_size, _, _ = x.size()\n # B, D, N\n x = F.relu(self.bn1(self.conv1(x)))\n # B, D, N\n x = F.relu(self.bn2(self.conv2(x)))\n x = x.permute(0, 2, 1)\n\n\n new_xyz, new_feature = sample_and_group(npoint=512, radius=0.15, nsample=32, xyz=xyz,\n points=x)\n feature_0 = self.gather_local_0(new_feature)\n feature = feature_0.permute(0, 2, 1)\n new_xyz, new_feature = sample_and_group(npoint=256, radius=0.2, nsample=32, xyz=new_xyz,\n points=feature)\n feature_1 = self.gather_local_1(new_feature)\n\n\n x = self.pt_last(feature_1, new_xyz)\n x = torch.cat([x, feature_1], dim=1)\n x = self.conv_fuse(x)\n x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)\n x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)\n x = self.dp1(x)\n x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)\n x = self.dp2(x)\n x = self.linear3(x)\n\n return x\n\nclass Point_Transformer_Last(nn.Module):\n def __init__(self, args, channels=256):\n super(Point_Transformer_Last, self).__init__()\n self.args = args\n self.conv1 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)\n self.conv2 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)\n self.conv3 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)\n self.conv4 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)\n\n self.conv5 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)\n self.conv6 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)\n\n self.bn1 = nn.BatchNorm1d(channels)\n self.bn2 = nn.BatchNorm1d(channels)\n self.bn3 = nn.BatchNorm1d(channels)\n self.bn4 = nn.BatchNorm1d(channels)\n\n self.bn5 = nn.BatchNorm1d(channels)\n self.bn6 = nn.BatchNorm1d(channels)\n\n self.use_tmd = args.use_tmd\n\n if not self.use_tmd:\n self.sa1 = SA_Layer(channels)\n self.sa2 = SA_Layer(channels)\n self.sa3 = SA_Layer(channels)\n self.sa4 = SA_Layer(channels)\n\n else: # use TMDlayers to replace SA layers\n L_latent = 16 # this latent dimension can be changed according to different tasks/datasets using validation set\n self.pi_list = nn.ModuleList([nn.Sequential(nn.Linear(L_latent, channels),\n nn.ReLU(),\n nn.Linear(channels, 1),\n nn.Sigmoid()) for _ in range(7)])\n self.proj_list = nn.ModuleList([nn.Linear(channels, L_latent) for _ in range(7)])\n self.conv_fuse_list = nn.ModuleList([nn.Sequential(nn.Conv1d(channels*2, channels, kernel_size=1, bias=False),\n nn.BatchNorm1d(channels),\n nn.LeakyReLU(negative_slope=0.2)) for _ in range(7)])\n\n self.dt_1 = nn.Parameter(torch.FloatTensor([0.1]))\n self.dt_2 = nn.Parameter(torch.FloatTensor([0.1]))\n self.dt_3 = nn.Parameter(torch.FloatTensor([0.1]))\n self.dt_4 = nn.Parameter(torch.FloatTensor([0.1]))\n self.dt_5 = nn.Parameter(torch.FloatTensor([0.1]))\n self.dt_6 = nn.Parameter(torch.FloatTensor([0.1]))\n\n def TMD_map(self, x, idx):\n # input x if of size [B, d, N]\n x = x.permute(0, 2, 1)\n x = self.proj_list[idx](x)\n # L = construct from pe\n epsilon = 0.25\n i_minus_j = x.unsqueeze(2) - x.unsqueeze(1)\n K_epsilon = torch.exp(-1 / (4 * epsilon) * (i_minus_j ** 2).sum(dim=3))\n ### construct TMD\n q_epsilon_tilde = K_epsilon.sum(dim=2)\n D_epsilon_tilde = torch.diag_embed(self.pi_list[idx](x).squeeze(2) / q_epsilon_tilde)\n K_tilde = K_epsilon.bmm(D_epsilon_tilde)\n D_tilde = torch.diag_embed(K_tilde.sum(dim=2) +\n 1e-5 * torch.ones(K_tilde.shape[0], K_tilde.shape[1]).to(x.device))\n L = 1 / epsilon * (torch.inverse(D_tilde).bmm(K_tilde)) - torch.eye(K_tilde.shape[1]).to(\n x.device).unsqueeze(0).repeat(x.shape[0], 1, 1)\n return L\n\n def forward(self, x, xyz):\n # \n # b, 3, npoint, nsample \n # conv2d 3 -> 128 channels 1, 1\n # b * npoint, c, nsample \n # permute reshape\n batch_size, _, N = x.size()\n\n x = F.relu(self.bn5(self.conv5(x)))\n x = F.relu(self.bn6(self.conv6(x)))\n\n if self.use_tmd:\n x_orig = x\n x = F.relu(self.bn1(self.conv1(x)))\n x = x.permute(0, 2, 1)\n L = self.TMD_map(x_orig, idx=5)\n x = (x + self.dt_5 * torch.matmul(L, x)).permute(0, 2, 1)\n x1 = x\n\n x_orig = x\n x = F.relu(self.bn2(self.conv2(x)))\n if self.use_tmd:\n x = x.permute(0, 2, 1)\n L = self.TMD_map(x_orig, idx=6)\n x = (x + self.dt_6 * torch.matmul(L, x)).permute(0, 2, 1)\n x2 = x\n\n x_orig = x\n x = F.relu(self.bn3(self.conv3(x)))\n x = x.permute(0, 2, 1)\n L = self.TMD_map(x_orig, idx=1)\n x = (x + self.dt_1 * torch.matmul(L, x)).permute(0, 2, 1)\n x3 = x\n\n x_orig = x\n x = F.relu(self.bn4(self.conv4(x)))\n x = x.permute(0, 2, 1)\n L = self.TMD_map(x_orig, idx=2)\n x = (x + self.dt_2 * torch.matmul(L, x)).permute(0, 2, 1)\n x4 = x\n\n else:\n ### The self-attention layers in the original PCT paper.\n x1 = self.sa1(x)\n\n x2 = self.sa2(x1)\n\n x3 = self.sa3(x2)\n\n x4 = self.sa4(x3)\n\n x = torch.cat((x1, x2, x3, x4), dim=1)\n\n return x\n\nclass SA_Layer(nn.Module):\n def __init__(self, channels):\n super(SA_Layer, self).__init__()\n self.q_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)\n self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)\n self.q_conv.weight = self.k_conv.weight\n self.q_conv.bias = self.k_conv.bias\n\n self.v_conv = nn.Conv1d(channels, channels, 1)\n self.trans_conv = nn.Conv1d(channels, channels, 1)\n self.after_norm = nn.BatchNorm1d(channels)\n self.act = nn.ReLU()\n self.softmax = nn.Softmax(dim=-1)\n\n\n def forward(self, x):\n\n # b, n, c\n x_q = self.q_conv(x).permute(0, 2, 1)\n # b, c, n\n x_k = self.k_conv(x)\n x_v = self.v_conv(x)\n # b, n, n\n energy = torch.bmm(x_q, x_k)\n\n attention = self.softmax(energy)\n attention = attention / (1e-9 + attention.sum(dim=1, keepdim=True))\n # b, c, n\n x_r = torch.bmm(x_v, attention)\n x_r = self.act(self.after_norm(self.trans_conv(x - x_r)))\n x = x + x_r\n return x\n\n\n", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 9089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.functional.adaptive_max_pool1d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 58, "usage_type": "name"}, {"api_name": "util.sample_and_group", "line_number": 62, "usage_type": "call"}, {"api_name": "util.sample_and_group", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.functional.adaptive_max_pool1d", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.functional.leaky_relu", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.functional.leaky_relu", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn.Conv1d", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.diag_embed", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.diag_embed", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.inverse", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 155, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 167, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 182, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 202, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 202, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 206, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 211, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 212, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 213, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 214, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 230, "usage_type": "call"}]} +{"seq_id": "646410789", "text": "import h5py\nimport pytz\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import medfilt\n\nFILENAME = \"1541962108935000000_167_838.h5\"\nfile = h5py.File(FILENAME, 'r')\n\nutcTimeStamp = datetime.datetime.utcfromtimestamp(int(FILENAME[0:10]) + float(\".\"+FILENAME[10:19]))\nutcTime = utcTimeStamp # .strftime(\"%Y-%m-%d %H:%M:%S\")\ncernTime = pytz.timezone(\"Europe/Zurich\").fromutc(utcTimeStamp) # .strftime(\"%Y-%m-%d %H:%M:%S\")\n\nprint(\"Task 1:\")\nprint(utcTime)\nprint(cernTime)\nprint(\"\")\n\nprint(\"Task 2:\")\nprint(\"Wait...\")\n\nwith open(\"path.csv\", \"w\") as f:\n f.truncate()\n f.write(\"Path,DataType,Size,Shape\"+\"\\n\")\n\ndef appendToCSV(string, clear):\n with open(\"path.csv\", \"a\") as f:\n f.write(string + \"\\n\")\n\ndef recurExplore(name, Object):\n string = name\n if isinstance(Object, h5py.Dataset):\n try:\n string += Object.dtype.__str__()+\",\" + Object.size.__str__()+\",\" + Object.shape.__str__()\n except TypeError:\n # print(\"Dataset not NumPy compatible.\")\n string += \",Uncompatible DataType,,\"\n else:\n string += \",,,\"\n appendToCSV(string, False)\n\nfile.visititems(recurExplore)\nprint(\"Done!\\n\")\n\nprint(\"Task 3:\")\nimage1D = np.array(file[\"/AwakeEventData/XMPP-STREAK/StreakImage/streakImageData\"])\nheight = list(file[\"/AwakeEventData/XMPP-STREAK/StreakImage/streakImageHeight\"])[0]\nwidth = list(file[\"/AwakeEventData/XMPP-STREAK/StreakImage/streakImageWidth\"])[0]\nimage2D = np.reshape(image1D, (height, width))\nfinalImage = medfilt(image2D)\n\nplt.imshow(finalImage)\nplt.savefig(\"image.png\")\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "h5py.File", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 13, "usage_type": "call"}, {"api_name": "h5py.Dataset", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "195086438", "text": "import os\r\nimport mysql.connector as mysql\r\nimport requests\r\nfrom urllib.request import urlopen\r\n\r\ndef obtenerHTML(host):\r\n url ='http://' + host +'/php_tiendita_login/index.html'\r\n print(url)\r\n peticion = requests.get(url)\r\n print(\"\\nPetición HTTP\")\r\n print(\"\\nSitio: \"+ str(peticion.url))\r\n print(\"Codigo de operación: \"+ str(peticion.status_code))\r\n print(\"Codigo del sitio: \\n\" + str(peticion.text))\r\n if(str(peticion.status_code)==200):\r\n hosts.append(host)\r\n f = open (host + '.txt', 'w')\r\n f.write(str(peticion.text))\r\n f.close()\r\n\r\nhosts = []\r\nconexion = mysql.connect( host = 'localhost', user = 'root', passwd = '', db = 'nmap' )\r\noperacion = conexion.cursor()\r\noperacion.execute( \"SELECT direccion FROM puerto WHERE servicio = 'http';\")\r\nfor ip in operacion.fetchall() :\r\n print (ip)\r\n host = str(ip[0])\r\n try:\r\n obtenerHTML(host)\r\n except:\r\n print(\"No fue posible realizar la petición a: \" + host)\r\nconexion.close()", "sub_path": "parte_1.py", "file_name": "parte_1.py", "file_ext": "py", "file_size_in_byte": 996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "mysql.connector.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "mysql.connector", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "165835883", "text": "import struct\r\nimport json\r\nimport logging\r\nimport traceback\r\n\r\n\r\ndef pack(cmd = 0x000000, **options):\r\n \r\n size = 0\r\n package = None\r\n\r\n if cmd == HeartingPackage.m_cmd:\r\n\r\n size = struct.calcsize(HeartingPackage.fmt)\r\n package = struct.pack(HeartingPackage.fmt, cmd, size)\r\n\r\n elif cmd == LoginPackage.m_cmd:\r\n\r\n serverid = options.get('serverid', '')\r\n sig = options.get('sig', '')\r\n\r\n size = struct.calcsize(LoginPackage.fmt)\r\n package = struct.pack(LoginPackage.fmt, cmd, size, serverid.encode('utf-8'), sig.encode('utf-8'))\r\n\r\n elif cmd == LoginResponsePackage.m_cmd:\r\n\r\n code = options.get('code', 0x000000)\r\n\r\n size = struct.calcsize(LoginResponsePackage.fmt)\r\n package = struct.pack(LoginResponsePackage.fmt, cmd, size, code)\r\n\r\n elif cmd == ChatFilterPackage.m_cmd:\r\n\r\n msgid = options.get('msgid', 0x000000)\r\n msgtxt = options.get('msgtxt', '')\r\n msg_bytes = bytes(msgtxt, 'utf-8')\r\n\r\n msgsize = len(msg_bytes)\r\n \r\n size = struct.calcsize(ChatFilterPackage.fmt) + msgsize\r\n\r\n package = struct.pack(ChatFilterPackage.fmt, cmd, size, msgid, msgsize) + msg_bytes\r\n # package = struct.pack('!4i100s', cmd, size, msgid, msgsize, msgtxt.encode('utf-8'))\r\n\r\n elif cmd == ChatWithJSONPackage.m_cmd:\r\n\r\n json_data = options.get('json', {})\r\n msgid = options.get('msgid', 0x000000)\r\n msgtxt = json_data.get('msg', '')\r\n roomid = json_data.get('roomid', 'none')\r\n loginname = json_data.get('loginname', '')\r\n\r\n json_byte = bytes(json.dumps({'msg': msgtxt, 'roomid': roomid, 'loginname': loginname}), 'utf-8')\r\n jsonsize = len(json_byte)\r\n\r\n size = struct.calcsize(ChatWithJSONPackage.fmt) + jsonsize\r\n package = struct.pack(ChatWithJSONPackage.fmt, cmd, size, msgid, jsonsize) + json_byte\r\n\r\n elif cmd == ChatFilterResponsePackage.m_cmd:\r\n\r\n msgid = options.get('msgid', 0x000000)\r\n code = options.get('code', 0x000000)\r\n\r\n size = struct.calcsize(ChatFilterResponsePackage.fmt)\r\n package = struct.pack(ChatFilterResponsePackage.fmt, cmd, size, msgid, code)\r\n\r\n elif cmd == NickNameFilterRequestPackage.m_cmd:\r\n\r\n reqid = options.get('reqid', 0x000000)\r\n nickname = options.get('nickname', '')\r\n byte_nickname = bytes(nickname, 'utf-8')\r\n\r\n size = struct.calcsize(NickNameFilterRequestPackage.fmt) + len(byte_nickname)\r\n\r\n package = struct.pack(NickNameFilterRequestPackage.fmt, cmd, size, reqid) + byte_nickname\r\n\r\n elif cmd == NickNameFilterResponsePackage.m_cmd:\r\n\r\n reqid = options.get('reqid', 0x000000)\r\n code = options.get('code', 0x000000)\r\n\r\n size = struct.calcsize(NickNameFilterResponsePackage.fmt)\r\n package = struct.pack(NickNameFilterResponsePackage.fmt, cmd, size, reqid, code)\r\n\r\n else:\r\n\r\n package = struct.pack('x')\r\n \r\n\r\n return package\r\n\r\n\r\ndef unpack(buffer):\r\n package = {'size': -1}\r\n try:\r\n (cmd,) = struct.unpack('!i', buffer[:4])\r\n except Exception as err:\r\n logging.error('Unpack Packgae Failed. Buffer: {}'.format(buffer))\r\n cmd = 0x000000\r\n # logging.debug(' -- unpack cmd: {}'.format(cmd))\r\n\r\n if cmd == HeartingPackage.m_cmd:\r\n\r\n package = HeartingPackage(buffer)\r\n\r\n elif cmd == LoginPackage.m_cmd:\r\n\r\n package = LoginPackage(buffer)\r\n\r\n elif cmd == LoginResponsePackage.m_cmd:\r\n\r\n package = LoginResponsePackage(buffer)\r\n\r\n elif cmd == ChatFilterPackage.m_cmd:\r\n\r\n package = ChatFilterPackage(buffer)\r\n\r\n elif cmd == ChatWithJSONPackage.m_cmd:\r\n\r\n package = ChatWithJSONPackage(buffer)\r\n\r\n elif cmd == ChatFilterResponsePackage.m_cmd:\r\n\r\n package = ChatFilterResponsePackage(buffer)\r\n\r\n elif cmd == NickNameFilterRequestPackage.m_cmd:\r\n\r\n package = NickNameFilterRequestPackage(buffer)\r\n\r\n elif cmd == NickNameFilterResponsePackage.m_cmd:\r\n\r\n package = NickNameFilterResponsePackage(buffer)\r\n \r\n else:\r\n\r\n package = BasicStructPackage(buffer)\r\n \r\n left_buffer = buffer[package.size:]\r\n \r\n return package, left_buffer\r\n\r\n\r\nclass BasicStructPackage():\r\n m_cmd = 0x000000\r\n cmd = 0x000000\r\n size = 0x000000\r\n fmt = '!2i'\r\n\r\n def __init__(self, buffer):\r\n try:\r\n self.parse(buffer)\r\n except:\r\n pass\r\n\r\n def parse(self, buffer):\r\n cmd, size = struct.unpack(self.fmt, buffer)\r\n self.cmd = cmd\r\n self.size = size\r\n \r\n\r\n\r\nclass HeartingPackage(BasicStructPackage):\r\n m_cmd = 0x000001\r\n timestamp = 0\r\n\r\n def parse(self, buffer):\r\n cmd, size = struct.unpack(self.fmt, buffer)\r\n self.cmd = cmd\r\n self.size = size\r\n \r\n\r\nclass LoginPackage(BasicStructPackage):\r\n m_cmd = 0x040001\r\n fmt = '!2i16s16s'\r\n serverid = '' # chat server id \r\n sig = '' # login password\r\n\r\n def parse(self, buffer):\r\n cmd, size, serverid, sig = struct.unpack(self.fmt, buffer)\r\n self.cmd = cmd\r\n self.size = size\r\n self.serverid = serverid.decode('utf-8').rstrip('\\x00')\r\n self.sig = sig.decode('utf-8').rstrip('\\x00')\r\n # print('LoginPackage serverid: ', serverid)\r\n\r\n\r\nclass LoginResponsePackage(BasicStructPackage):\r\n m_cmd = 0x040002\r\n fmt = '!3i'\r\n code = 0 # 0 is successful, others is failed\r\n\r\n def parse(self, buffer):\r\n cmd, size, code = struct.unpack(self.fmt, buffer)\r\n self.cmd = cmd\r\n self.size = size\r\n self.code = code\r\n\r\n\r\nclass ChatFilterPackage(BasicStructPackage):\r\n m_cmd = 0x040003\r\n fmt = '!4i'\r\n msgid = 0x040000\r\n msgsize = 0x000000\r\n msg = '' # max 255 char\r\n msgbuffer = b''\r\n\r\n def parse(self, buffer):\r\n buffer_size = struct.calcsize(self.fmt)\r\n _fmt_buffer = buffer[:buffer_size]\r\n _left_buffer = buffer[buffer_size:]\r\n\r\n cmd, size, msgid, msgsize = struct.unpack(self.fmt, _fmt_buffer)\r\n\r\n self.cmd = cmd\r\n self.size = size\r\n self.msgid = msgid\r\n self.msgsize = msgsize\r\n \r\n if msgsize:\r\n self.msgbuffer = _left_buffer[:msgsize]\r\n else:\r\n self.msgbuffer = _left_buffer\r\n \r\n try:\r\n self.msg = self.msgbuffer.decode('utf-8')\r\n except:\r\n logging.error('Unpack Failed :: CMD= {}, Buffer= {}'.format(cmd, _left_buffer))\r\n self.msg = self.msgbuffer.decode('utf-8', \"ignore\")\r\n\r\n\r\n if len(self.msg) > 255:\r\n self.msg = self.msg[:255]\r\n\r\n\r\nclass ChatWithJSONPackage(BasicStructPackage):\r\n m_cmd = 0x041003\r\n fmt = '!4i'\r\n msgid = 0x040000\r\n roomid = 'none'\r\n loginname = ''\r\n msg = ''\r\n jsonsize = 0x000000\r\n jsonstr = ''\r\n json = {}\r\n\r\n def parse(self, buffer):\r\n buffer_size = struct.calcsize(self.fmt)\r\n _fmt_buffer = buffer[:buffer_size]\r\n _left_buffer = buffer[buffer_size:]\r\n\r\n cmd, size, msgid, jsonsize = struct.unpack(self.fmt, _fmt_buffer)\r\n self.cmd = cmd\r\n self.size = size\r\n self.msgid = msgid\r\n self.jsonsize = jsonsize\r\n \r\n if jsonsize:\r\n self.jsonbuffer = _left_buffer[:jsonsize]\r\n else:\r\n logging.warning('ChatWithJSONPackage :: Package No Specify Jsonsize (size={} msgid={})'.format(size, msgid))\r\n self.jsonbuffer = _left_buffer\r\n \r\n try:\r\n self.jsonstr = self.jsonbuffer.decode('utf-8')\r\n self.json = json.loads(self.jsonstr.strip(), strict=False)\r\n self.roomid = self.json.get('roomid', 'none')\r\n self.msg = self.json.get('msg', '')\r\n self.loginname = self.json.get('loginname', '')\r\n except Exception as e:\r\n afterignorejson = self.jsonbuffer.decode('utf-8', \"ignore\")\r\n logging.error('ChatWithJSONPackage :: Unpack Failed (JSON= {}, ignoreJSON= {}, jsonsize= {})'.format(self.jsonstr, afterignorejson, jsonsize))\r\n logging.error(traceback.format_exc())\r\n self.json = {}\r\n self.msg = '[Parsing Byte Failed]'\r\n self.msgid = msgid if msgid > 0 else -1\r\n if jsonsize and len(buffer) < 256:\r\n self.size = 0\r\n else:\r\n self.size = size\r\n\r\n\r\nclass ChatFilterResponsePackage(BasicStructPackage):\r\n m_cmd = 0x040004\r\n fmt = '!4i'\r\n msgid = 0x000000\r\n code = 0x000000 # 0:normal; 1:ads; 2:dirty words; 3:system failure\r\n\r\n def parse(self, buffer):\r\n buffer_size = struct.calcsize(self.fmt)\r\n cmd, size, msgid, code = struct.unpack(self.fmt, buffer[:buffer_size])\r\n self.cmd = cmd\r\n self.size = size\r\n self.msgid = msgid\r\n self.code = code\r\n\r\n\r\nclass NickNameFilterRequestPackage(BasicStructPackage):\r\n m_cmd = 0x040007\r\n fmt = '!3i'\r\n reqid = 0x000000\r\n nickname = ''\r\n\r\n def parse(self, buffer):\r\n buffer_size = struct.calcsize(self.fmt)\r\n _fmt_buffer = buffer[:buffer_size]\r\n _left_buffer = buffer[buffer_size:]\r\n\r\n cmd, size, reqid = struct.unpack(self.fmt, _fmt_buffer)\r\n _left_size = size - buffer_size\r\n self.cmd = cmd\r\n self.size = size\r\n self.reqid = reqid\r\n\r\n try:\r\n self.nickname = _left_buffer[:_left_size].decode('utf-8').replace(\"\\x00\", \"\")\r\n except:\r\n logging.error('Unpack NickNameFilterRequestPackage Failed :: CMD= {}, Buffer= {}'.format(cmd, _left_buffer))\r\n self.nickname = _left_buffer[:_left_size].decode('utf-8', \"ignore\").replace(\"\\x00\", \"\")\r\n\r\n\r\nclass NickNameFilterResponsePackage(BasicStructPackage):\r\n m_cmd = 0x040008\r\n fmt = '!4i'\r\n reqid = 0x000000\r\n code = 0x000000 # 0:normal; 1:ads; 2:dirty words; 3:invalid pattern; 4:system failure\r\n\r\n def parse(self, buffer):\r\n\r\n cmd, size, reqid, code = struct.unpack(self.fmt, buffer)\r\n self.cmd = cmd\r\n self.size = size\r\n self.reqid = reqid\r\n self.code = code\r\n", "sub_path": "tcpsocket/chat_package.py", "file_name": "chat_package.py", "file_ext": "py", "file_size_in_byte": 10096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "struct.calcsize", "line_number": 14, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 15, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 22, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 23, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 29, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 30, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 40, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 56, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 57, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 64, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 65, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 73, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 75, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 82, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 83, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 87, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 96, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 98, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 156, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 167, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 179, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 193, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 208, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 212, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 227, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 247, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 251, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 260, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 265, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 271, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 272, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 272, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 289, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 290, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 304, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 308, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 317, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 329, "usage_type": "call"}]} +{"seq_id": "116273460", "text": "# --------------------------------------------------------\n# Deformable Convolutional Networks\n# Copyright (c) 2017 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Haozhi Qi\n# --------------------------------------------------------\n\nimport cPickle\nimport mxnet as mx\nfrom utils.symbol import Symbol\nfrom operator_py.pyramid_proposal import *\nfrom operator_py.proposal_target import *\nfrom operator_py.fpn_roi_pooling import *\nfrom operator_py.box_annotator_ohem import *\nfrom operator_py.focal_loss_OptimizedVersion import *\n\nclass vgg_16_fpn_rcnn_l1_focal(Symbol):\n def __init__(self):\n \"\"\"\n Use __init__ to define parameter network needs\n \"\"\"\n self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']\n self.shared_param_dict = {}\n for name in self.shared_param_list:\n self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')\n self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')\n\n self.eps = 2e-5\n self.use_global_stats = True\n self.workspace = 512\n self.res_deps = {'50': (3, 4, 6, 3), '101': (3, 4, 23, 3), '152': (3, 8, 36, 3), '200': (3, 24, 36, 3)}\n self.units = self.res_deps['50']\n self.filter_list = [256, 512, 1024, 2048]\n\n def residual_unit(self,data, num_filter, stride, dim_match, name):\n bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=self.eps, use_global_stats=self.use_global_stats, name=name + '_bn1')\n act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')\n conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),\n no_bias=True, workspace=self.workspace, name=name + '_conv1')\n bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=self.eps, use_global_stats=self.use_global_stats, name=name + '_bn2')\n act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')\n conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1),\n no_bias=True, workspace=self.workspace, name=name + '_conv2')\n bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=self.eps, use_global_stats=self.use_global_stats, name=name + '_bn3')\n act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')\n conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True,\n workspace=self.workspace, name=name + '_conv3')\n if dim_match:\n shortcut = data\n else:\n shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,\n workspace=self.workspace, name=name + '_sc')\n sum = mx.sym.ElementWiseSum(*[conv3, shortcut], name=name + '_plus')\n return sum\n\n\n def get_resnet_conv(self,data):\n conv_C0 = data\n # res1\n data_bn = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=self.eps, use_global_stats=self.use_global_stats, name='bn_data')\n conv0 = mx.sym.Convolution(data=data_bn, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3),\n no_bias=True, name=\"conv0\", workspace=self.workspace)\n bn0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=self.eps, use_global_stats=self.use_global_stats, name='bn0')\n relu0 = mx.sym.Activation(data=bn0, act_type='relu', name='relu0')\n conv_C1 = relu0\n pool0 = mx.symbol.Pooling(data=relu0, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='pool0')\n \n # res2\n unit = self.residual_unit(data=pool0, num_filter=self.filter_list[0], stride=(1, 1), dim_match=False, name='stage1_unit1')\n for i in range(2, self.units[0] + 1):\n unit = self.residual_unit(data=unit, num_filter=self.filter_list[0], stride=(1, 1), dim_match=True,\n name='stage1_unit%s' % i)\n conv_C2 = unit\n\n # res3\n unit = self.residual_unit(data=unit, num_filter=self.filter_list[1], stride=(2, 2), dim_match=False, name='stage2_unit1')\n for i in range(2, self.units[1] + 1):\n unit = self.residual_unit(data=unit, num_filter=self.filter_list[1], stride=(1, 1), dim_match=True,\n name='stage2_unit%s' % i)\n conv_C3 = unit\n\n # res4\n unit = self.residual_unit(data=unit, num_filter=self.filter_list[2], stride=(2, 2), dim_match=False, name='stage3_unit1')\n for i in range(2, self.units[2] + 1):\n unit = self.residual_unit(data=unit, num_filter=self.filter_list[2], stride=(1, 1), dim_match=True,\n name='stage3_unit%s' % i)\n conv_C4 = unit\n\n # res5\n unit = self.residual_unit(data=unit, num_filter=self.filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')\n for i in range(2, self.units[3] + 1):\n unit = self.residual_unit(data=unit, num_filter=self.filter_list[3], stride=(1, 1), dim_match=True,\n name='stage4_unit%s' % i)\n conv_C5 = unit\n\n conv_feat = [conv_C5, conv_C4, conv_C3, conv_C2, conv_C1, conv_C0]\n return conv_feat\n #return conv_C0, conv_C1, conv_C2, conv_C3, conv_C4, conv_C5\n def get_vgg_conv(self,data):\n \"\"\"\n shared convolutional layers\n :param data: Symbol\n :return: Symbol\n \"\"\"\n # group 1\n conv1_1 = mx.symbol.Convolution(\n data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name=\"conv1_1\")\n relu1_1 = mx.symbol.Activation(data=conv1_1, act_type=\"relu\", name=\"relu1_1\")\n conv1_2 = mx.symbol.Convolution(\n data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name=\"conv1_2\")\n relu1_2 = mx.symbol.Activation(data=conv1_2, act_type=\"relu\", name=\"relu1_2\")\n pool1 = mx.symbol.Pooling(\n data=relu1_2, pool_type=\"max\", kernel=(2, 2), stride=(2, 2), name=\"pool1\")\n # group 2\n conv2_1 = mx.symbol.Convolution(\n data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name=\"conv2_1\")\n relu2_1 = mx.symbol.Activation(data=conv2_1, act_type=\"relu\", name=\"relu2_1\")\n conv2_2 = mx.symbol.Convolution(\n data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name=\"conv2_2\")\n relu2_2 = mx.symbol.Activation(data=conv2_2, act_type=\"relu\", name=\"relu2_2\")\n pool2 = mx.symbol.Pooling(\n data=relu2_2, pool_type=\"max\", kernel=(2, 2), stride=(2, 2), name=\"pool2\")\n # group 3\n conv3_1 = mx.symbol.Convolution(\n data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name=\"conv3_1\")\n relu3_1 = mx.symbol.Activation(data=conv3_1, act_type=\"relu\", name=\"relu3_1\")\n conv3_2 = mx.symbol.Convolution(\n data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name=\"conv3_2\")\n relu3_2 = mx.symbol.Activation(data=conv3_2, act_type=\"relu\", name=\"relu3_2\")\n conv3_3 = mx.symbol.Convolution(\n data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name=\"conv3_3\")\n relu3_3 = mx.symbol.Activation(data=conv3_3, act_type=\"relu\", name=\"relu3_3\")\n pool3 = mx.symbol.Pooling(\n data=relu3_3, pool_type=\"max\", kernel=(2, 2), stride=(2, 2), name=\"pool3\")\n # group 4\n conv4_1 = mx.symbol.Convolution(\n data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name=\"conv4_1\")\n relu4_1 = mx.symbol.Activation(data=conv4_1, act_type=\"relu\", name=\"relu4_1\")\n conv4_2 = mx.symbol.Convolution(\n data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name=\"conv4_2\")\n relu4_2 = mx.symbol.Activation(data=conv4_2, act_type=\"relu\", name=\"relu4_2\")\n conv4_3 = mx.symbol.Convolution(\n data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name=\"conv4_3\")\n relu4_3 = mx.symbol.Activation(data=conv4_3, act_type=\"relu\", name=\"relu4_3\")\n pool4 = mx.symbol.Pooling(\n data=relu4_3, pool_type=\"max\", kernel=(2, 2), stride=(2, 2), name=\"pool4\")\n # group 5\n conv5_1 = mx.symbol.Convolution(\n data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name=\"conv5_1\")\n relu5_1 = mx.symbol.Activation(data=conv5_1, act_type=\"relu\", name=\"relu5_1\")\n conv5_2 = mx.symbol.Convolution(\n data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name=\"conv5_2\")\n relu5_2 = mx.symbol.Activation(data=conv5_2, act_type=\"relu\", name=\"relu5_2\")\n conv5_3 = mx.symbol.Convolution(\n data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name=\"conv5_3\")\n relu5_3 = mx.symbol.Activation(data=conv5_3, act_type=\"relu\", name=\"relu5_3\")\n conv_feat = [relu5_3, relu4_3, relu3_3, relu2_2, relu1_2]\n return conv_feat\n def get_resnet_conv_down(self,conv_feat,dim = 256):\n # C5 to P5, 1x1 dimension reduction to 256\n P5 = mx.symbol.Convolution(data=conv_feat[0], kernel=(1, 1), num_filter=dim, name=\"P5_lateral\")\n\n # P5 2x upsampling + C4 = P4\n P5_up = mx.symbol.UpSampling(P5, scale=2, sample_type='nearest', workspace=512, name='P5_upsampling', num_args=1)\n P4_la = mx.symbol.Convolution(data=conv_feat[1], kernel=(1, 1), num_filter=dim, name=\"P4_lateral\")\n P5_clip = mx.symbol.Crop(*[P5_up, P4_la], name=\"P4_clip\")\n P4 = mx.sym.ElementWiseSum(*[P5_clip, P4_la], name=\"P4_sum\")\n P4 = mx.symbol.Convolution(data=P4, kernel=(3, 3), pad=(1, 1), num_filter=dim, name=\"P4_aggregate\")\n\n # P4 2x upsampling + C3 = P3\n P4_up = mx.symbol.UpSampling(P4, scale=2, sample_type='nearest', workspace=512, name='P4_upsampling', num_args=1)\n P3_la = mx.symbol.Convolution(data=conv_feat[2], kernel=(1, 1), num_filter=dim, name=\"P3_lateral\")\n P4_clip = mx.symbol.Crop(*[P4_up, P3_la], name=\"P3_clip\")\n P3 = mx.sym.ElementWiseSum(*[P4_clip, P3_la], name=\"P3_sum\")\n P3 = mx.symbol.Convolution(data=P3, kernel=(3, 3), pad=(1, 1), num_filter=dim, name=\"P3_aggregate\")\n\n # P3 2x upsampling + C2 = P2\n P3_up = mx.symbol.UpSampling(P3, scale=2, sample_type='nearest', workspace=512, name='P3_upsampling', num_args=1)\n P2_la = mx.symbol.Convolution(data=conv_feat[3], kernel=(1, 1), num_filter=dim, name=\"P2_lateral\")\n P3_clip = mx.symbol.Crop(*[P3_up, P2_la], name=\"P2_clip\")\n P2 = mx.sym.ElementWiseSum(*[P3_clip, P2_la], name=\"P2_sum\")\n P2 = mx.symbol.Convolution(data=P2, kernel=(3, 3), pad=(1, 1), num_filter=dim, name=\"P2_aggregate\")\n \n P2_up = mx.symbol.UpSampling(P2, scale=2, sample_type='nearest', workspace=512, name='P2_upsampling', num_args=1)\n P1_la = mx.symbol.Convolution(data=conv_feat[4], kernel=(1, 1), num_filter=dim, name=\"P1_lateral\")\n P2_clip = mx.symbol.Crop(*[P2_up, P1_la], name=\"P1_clip\")\n P1 = mx.sym.ElementWiseSum(*[P2_clip, P1_la], name=\"P1_sum\")\n P1 = mx.symbol.Convolution(data=P1, kernel=(3, 3), pad=(1, 1), num_filter=dim, name=\"P1_aggregate\")\n '''\n P1_up = mx.symbol.UpSampling(P1, scale=2, sample_type='nearest', workspace=512, name='P1_upsampling', num_args=1)\n P0_la = mx.symbol.Convolution(data=conv_feat[5], kernel=(1, 1), num_filter=dim, name=\"P0_lateral\")\n P1_clip = mx.symbol.Crop(*[P1_up, P0_la], name=\"P0_clip\")\n P0 = mx.sym.ElementWiseSum(*[P1_clip, P0_la], name=\"P0_sum\")\n P0 = mx.symbol.Convolution(data=P0, kernel=(3, 3), pad=(1, 1), num_filter=dim, name=\"P0_aggregate\")\n '''\n # P6 2x subsampling P5\n P6 = mx.symbol.Pooling(data=P5, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='P6_subsampling')\n\n #conv_fpn_feat = dict()\n #conv_fpn_feat.update({\"stride64\":P6, \"stride32\":P5, \"stride16\":P4, \"stride8\":P3, \"stride4\":P2, \"stride2\":P1, \"stride1\":P0})\n #return conv_fpn_feat, [P6, P5, P4, P3, P2, P1, P0]\n return P1,P2, P3, P4, P5, P6\n\n def get_rpn_subnet(self, data, num_anchors, suffix):\n rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,\n weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])\n rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)\n rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,\n weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])\n rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,\n weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])\n\n # n x (2*A) x H x W => n x 2 x (A*H*W)\n rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)\n rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)\n rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)\n rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)\n rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)\n return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred\n\n def get_symbol(self, cfg, is_train=True):\n\n # config alias for convenient\n num_classes = cfg.dataset.NUM_CLASSES\n num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)\n\n data = mx.sym.Variable(name=\"data\")\n im_info = mx.sym.Variable(name=\"im_info\")\n\n # shared convolutional layers\n #res0, res1, res2, res3, res4, res5 = self.get_resnet_backbone(data)\n #fpn_p0, fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)\n #fpn_p0, fpn_p1, fpn_p2, fpn_p3,fpn_p4 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)\n conv_feat = self.get_vgg_conv(data)\n fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_resnet_conv_down(conv_feat,128)\n\n #rpn_cls_score_p0, rpn_prob_p0, rpn_bbox_loss_p0, rpn_bbox_pred_p0 = self.get_rpn_subnet(fpn_p0, cfg.network.NUM_ANCHORS, 'p0')\n rpn_cls_score_p1, rpn_prob_p1, rpn_bbox_loss_p1, rpn_bbox_pred_p1 = self.get_rpn_subnet(fpn_p1, cfg.network.NUM_ANCHORS, 'p1')\n rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')\n rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')\n rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')\n rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')\n rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')\n\n rpn_cls_prob_dict = {\n 'rpn_cls_prob_stride64': rpn_prob_p6,\n 'rpn_cls_prob_stride32': rpn_prob_p5,\n 'rpn_cls_prob_stride16': rpn_prob_p4,\n 'rpn_cls_prob_stride8': rpn_prob_p3,\n 'rpn_cls_prob_stride4': rpn_prob_p2,\n 'rpn_cls_prob_stride2': rpn_prob_p1,\n #'rpn_cls_prob_stride1': rpn_prob_p0,\n }\n rpn_bbox_pred_dict = {\n 'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,\n 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,\n 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,\n 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,\n 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,\n 'rpn_bbox_pred_stride2': rpn_bbox_pred_p1,\n #'rpn_bbox_pred_stride1': rpn_bbox_pred_p0,\n }\n arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())\n\n if is_train:\n rpn_label = mx.sym.Variable(name='label')\n rpn_bbox_target = mx.sym.Variable(name='bbox_target')\n rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')\n gt_boxes = mx.sym.Variable(name=\"gt_boxes\")\n\n rpn_cls_score = mx.sym.Concat( rpn_cls_score_p1,rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)\n rpn_bbox_loss = mx.sym.Concat( rpn_bbox_loss_p1,rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)\n\n # RPN classification loss\n rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',\n use_ignore=True, ignore_label=-1, name='rpn_cls_prob')\n # bounding box regression\n rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))\n rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)\n\n aux_dict = {\n 'op_type': 'pyramid_proposal', 'name': 'rois',\n 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),\n 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),\n 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,\n 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE\n }\n\n # ROI proposal\n rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))\n # ROI proposal target\n gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')\n rois, label, bbox_target, bbox_weight \\\n = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,\n batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)\n else:\n aux_dict = {\n 'op_type': 'pyramid_proposal', 'name': 'rois',\n 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),\n 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),\n 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,\n 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE\n }\n # ROI proposal\n rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))\n\n roi_pool = mx.symbol.Custom(data_p1=fpn_p1,data_p2=fpn_p2, data_p3=fpn_p3,data_p4=fpn_p4,data_p5=fpn_p5,\n rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling',feat_strides='(2,4,8,16,32)')\n\n # 2 fc\n fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)\n fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')\n\n fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)\n fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')\n\n # cls_score/bbox_pred\n cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)\n bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)\n\n if is_train:\n if cfg.TRAIN.ENABLE_OHEM:\n labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,\n num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,\n cls_score=cls_score, bbox_pred=bbox_pred, labels=label,\n bbox_targets=bbox_target, bbox_weights=bbox_weight)\n cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)\n bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))\n bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)\n rcnn_label = labels_ohem\n elif cfg.TRAIN.ENABLE_FOCAL_LOSS:\n cls_prob = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=cls_score, labels=label, gamma= 2,alpha = 0.25)\n # cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')\n bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0,\n data=(bbox_pred - bbox_target))\n bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)\n rcnn_label = label\n else:\n cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')\n bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))\n bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)\n rcnn_label = label\n\n # reshape output\n rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')\n cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')\n bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')\n # group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, mx.sym.BlockGrad(cls_prob), mx.sym.BlockGrad(bbox_loss), mx.sym.BlockGrad(rcnn_label)])\n group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])\n else:\n cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)\n cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')\n bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')\n group = mx.sym.Group([rois, cls_prob, bbox_pred])\n\n self.sym = group\n return group\n\n def init_weight_rcnn(self, cfg, arg_params, aux_params):\n arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])\n arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])\n arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])\n arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])\n arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])\n arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])\n arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])\n arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])\n\n def init_weight_fpn(self, cfg, arg_params, aux_params):\n\n for i in [1,2,3,4,5]:\n arg_params['P'+str(i)+'_lateral_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['P'+str(i)+'_lateral_weight'])\n arg_params['P'+str(i)+'_lateral_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['P'+str(i)+'_lateral_bias']) \n\n for i in [1,2,3,4]:\n arg_params['P'+str(i)+'_aggregate_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['P'+str(i)+'_aggregate_weight'])\n arg_params['P'+str(i)+'_aggregate_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['P'+str(i)+'_aggregate_bias']) \n\n def init_weight(self, cfg, arg_params, aux_params):\n for name in self.shared_param_list:\n arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])\n arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])\n self.init_weight_rcnn(cfg, arg_params, aux_params)\n self.init_weight_fpn(cfg, arg_params, aux_params)\n", "sub_path": "fpn/symbols/alex_fpn_rcnn_l0_focal.py", "file_name": "alex_fpn_rcnn_l0_focal.py", "file_ext": "py", "file_size_in_byte": 25773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "utils.symbol.Symbol", "line_number": 17, "usage_type": "name"}, {"api_name": "mxnet.sym.Variable", "line_number": 25, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 26, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 36, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 36, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 37, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 38, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 38, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 40, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 40, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 41, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 41, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 42, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 42, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 44, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 44, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 45, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 45, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 46, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 46, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 51, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 51, "usage_type": "attribute"}, {"api_name": "mxnet.sym.ElementWiseSum", "line_number": 53, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 53, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 60, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 60, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 61, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 63, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 63, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 64, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 64, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Pooling", "line_number": 66, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 66, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 106, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 106, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 108, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 108, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 109, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 109, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 111, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 111, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Pooling", "line_number": 112, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 112, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 115, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 115, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 117, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 117, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 118, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 118, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 120, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 120, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Pooling", "line_number": 121, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 121, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 124, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 124, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 126, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 126, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 127, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 127, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 129, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 129, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 130, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 130, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 132, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 132, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Pooling", "line_number": 133, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 133, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 136, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 136, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 138, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 138, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 139, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 139, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 141, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 141, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 142, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 142, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 144, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 144, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Pooling", "line_number": 145, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 145, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 148, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 148, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 150, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 150, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 151, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 151, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 153, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 153, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 154, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 154, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Activation", "line_number": 156, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 156, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 161, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 161, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.UpSampling", "line_number": 164, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 164, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 165, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 165, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Crop", "line_number": 166, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 166, "usage_type": "attribute"}, {"api_name": "mxnet.sym.ElementWiseSum", "line_number": 167, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 167, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 168, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 168, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.UpSampling", "line_number": 171, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 171, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 172, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 172, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Crop", "line_number": 173, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 173, "usage_type": "attribute"}, {"api_name": "mxnet.sym.ElementWiseSum", "line_number": 174, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 174, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 175, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 175, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.UpSampling", "line_number": 178, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 178, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 179, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 179, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Crop", "line_number": 180, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 180, "usage_type": "attribute"}, {"api_name": "mxnet.sym.ElementWiseSum", "line_number": 181, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 181, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 182, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 182, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.UpSampling", "line_number": 184, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 184, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 185, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 185, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Crop", "line_number": 186, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 186, "usage_type": "attribute"}, {"api_name": "mxnet.sym.ElementWiseSum", "line_number": 187, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 187, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Convolution", "line_number": 188, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 188, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Pooling", "line_number": 197, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 197, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 205, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 205, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 207, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 207, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 208, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 208, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 210, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 210, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 214, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 214, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 215, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 215, "usage_type": "attribute"}, {"api_name": "mxnet.sym.SoftmaxActivation", "line_number": 216, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 216, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 217, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 217, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 218, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 218, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Variable", "line_number": 227, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 227, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Variable", "line_number": 228, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 228, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Variable", "line_number": 266, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 266, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Variable", "line_number": 267, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 267, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Variable", "line_number": 268, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 268, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Variable", "line_number": 269, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 269, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Concat", "line_number": 271, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 271, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Concat", "line_number": 272, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 272, "usage_type": "attribute"}, {"api_name": "mxnet.sym.SoftmaxOutput", "line_number": 275, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 275, "usage_type": "attribute"}, {"api_name": "mxnet.sym.smooth_l1", "line_number": 278, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 278, "usage_type": "attribute"}, {"api_name": "mxnet.sym.MakeLoss", "line_number": 279, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 279, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Custom", "line_number": 290, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 290, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 292, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 292, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Custom", "line_number": 294, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 294, "usage_type": "attribute"}, {"api_name": "cPickle.dumps", "line_number": 295, "usage_type": "call"}, {"api_name": "mxnet.sym.Custom", "line_number": 305, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 305, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.Custom", "line_number": 307, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 307, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.FullyConnected", "line_number": 311, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 311, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 312, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 312, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.FullyConnected", "line_number": 314, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 314, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 315, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 315, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.FullyConnected", "line_number": 318, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 318, "usage_type": "attribute"}, {"api_name": "mxnet.symbol.FullyConnected", "line_number": 319, "usage_type": "call"}, {"api_name": "mxnet.symbol", "line_number": 319, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Custom", "line_number": 323, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 323, "usage_type": "attribute"}, {"api_name": "mxnet.sym.SoftmaxOutput", "line_number": 327, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 327, "usage_type": "attribute"}, {"api_name": "mxnet.sym.smooth_l1", "line_number": 328, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 328, "usage_type": "attribute"}, {"api_name": "mxnet.sym.MakeLoss", "line_number": 329, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 329, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Custom", "line_number": 332, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 332, "usage_type": "attribute"}, {"api_name": "mxnet.sym.smooth_l1", "line_number": 334, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 334, "usage_type": "attribute"}, {"api_name": "mxnet.sym.MakeLoss", "line_number": 336, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 336, "usage_type": "attribute"}, {"api_name": "mxnet.sym.SoftmaxOutput", "line_number": 339, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 339, "usage_type": "attribute"}, {"api_name": "mxnet.sym.smooth_l1", "line_number": 340, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 340, "usage_type": "attribute"}, {"api_name": "mxnet.sym.MakeLoss", "line_number": 341, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 341, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 345, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 345, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 346, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 346, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 347, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 347, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Group", "line_number": 349, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 349, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BlockGrad", "line_number": 349, "usage_type": "call"}, {"api_name": "mxnet.sym.SoftmaxActivation", "line_number": 351, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 351, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 352, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 352, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 353, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 353, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Group", "line_number": 354, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 354, "usage_type": "attribute"}, {"api_name": "mxnet.random.normal", "line_number": 360, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 360, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 361, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 361, "usage_type": "attribute"}, {"api_name": "mxnet.random.normal", "line_number": 362, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 362, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 363, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 363, "usage_type": "attribute"}, {"api_name": "mxnet.random.normal", "line_number": 364, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 364, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 365, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 365, "usage_type": "attribute"}, {"api_name": "mxnet.random.normal", "line_number": 366, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 366, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 367, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 367, "usage_type": "attribute"}, {"api_name": "mxnet.random.normal", "line_number": 372, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 372, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 373, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 373, "usage_type": "attribute"}, {"api_name": "mxnet.random.normal", "line_number": 376, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 376, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 377, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 377, "usage_type": "attribute"}, {"api_name": "mxnet.random.normal", "line_number": 381, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 381, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 382, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 382, "usage_type": "attribute"}]} +{"seq_id": "624225420", "text": "import logging\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.interpolate import interp1d\r\n\r\ndef sigmoid(x):\r\n return 1/(1 + np.exp(-x)) \r\n\r\ndef Init_logging():\r\n log = logging.getLogger()\r\n log.setLevel(logging.INFO)\r\n logFormatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s')\r\n consoleHandler = logging.StreamHandler()\r\n consoleHandler.setFormatter(logFormatter)\r\n log.addHandler(consoleHandler)\r\n\r\n\r\ndef linear_interpolation(l, r, alpha):\r\n return l + alpha * (r - l)\r\n\r\n\r\nclass PiecewiseSchedule():\r\n def __init__(self, \r\n endpoints, \r\n interpolation=linear_interpolation, \r\n outside_value=None):\r\n \"\"\"\r\n Piecewise Linear learning schedule.\r\n \"\"\"\r\n idxes = [e[0] for e in endpoints]\r\n assert idxes == sorted(idxes)\r\n self._interpolation = interpolation\r\n self._outside_value = outside_value\r\n self._endpoints = endpoints\r\n\r\n def value(self, t):\r\n for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):\r\n if l_t <= t and t < r_t:\r\n alpha = float(t - l_t) / (r_t - l_t)\r\n return self._interpolation(l, r, alpha)\r\n\r\n ### t does not belong to any of the pieces, so doom.\r\n assert self._outside_value is not None\r\n return self._outside_value\r\n\r\n def __call__(self, t):\r\n '''\r\n for compatibility with keras callbacks\r\n '''\r\n return self.value(t)", "sub_path": "libs/soft/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.exp", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "363638987", "text": "import sqlite3\nfrom sqlite3 import Error\n\n\"\"\"\nthis module is used for managing data base connections and every thing related;\nthis applies for all(in case True that means the query worked False if else)\n\nfor all the methods declared below, have one thing in common ::\n True ==> if the methods works fine\n False ==> else\n\nneed improvements, so go ahead.\n\"\"\"\n\n\nclass Db(object):\n def __init__(self, db):\n self.db = db\n self.conn = \"\"\n self.query = \"\"\n self.cur = \"\"\n\n def connect(self):\n \"\"\"\n function used for starting the connection, to\n the data base -db-.\n \"\"\"\n try:\n self.conn = sqlite3.Connection(self.db)\n self.cur = self.conn.cursor()\n return True\n\n except Error:\n self.conn.close()\n return False\n\n def close(self):\n \"\"\"\n at the end of what ever you are doing in the DB, use this\n method to close all connections.\n\n might say the garbage collector will take care, but no\n clean code is way better for both meaning and performance.\n \"\"\"\n try:\n self.cur.close()\n self.conn.close()\n return True\n\n except Error:\n return False\n\n def createTable(self, table_query):\n \"\"\"\n this functions is made specially for creating tables,\n you can use the other function listed below, but this way i know\n exactly what i am doing.\n\n table_query == the quey to insert the table\n\n table = \\\"\"\"\n CREATE TABLE IF NOT EXISTS profile(\n id INTEGER PRIMARY KEY,\n nom TEXT NOT NULL,\n prenom TEXT NOT NULL\n );\n \\\"\"\"\n \"\"\"\n try:\n self.cur.execute(table_query)\n self.conn.commit()\n return True\n\n except Error:\n return False\n\n def prepareQuery(self, query):\n \"\"\"\n At first, call the method with ::\n\n quey = INSERT INTO tasks(name,priority,status_id,project_id,begin_date,end_date)\n VALUES(?,?,?,?,?,?)\n\n insert = prepareQuery(query)\n just to prepare the query for doing what ever\n like :: multi inserting elements.\n\n then call the second method like\n insertRow(list of values by the same order)\n\n this inner method can be called as many as you need.\n before using any of the methods listed under, you should use this one before.\n \"\"\"\n try:\n self.query = query\n return True\n\n except Error:\n return False\n\n def insertRow(self, row):\n \"\"\"\n but before using it, use the prepareQuery(), then call this\n one with a list of the parameters(Values).\n\n as said for inserting multiple data rows.\n\n Ex:\n ---\n prepareQuery(query)\n insertRow([data1, data2, ...........])\n \"\"\"\n try:\n self.cur.execute(self.query, row)\n self.conn.commit()\n return True\n\n except Error:\n return False\n\n def justQuery(self, data=None):\n \"\"\"\n but before using it, use the prepareQuery(), then call this\n one with a list of the parameters(Values).\n\n for another usage query like delete, update any other type for queries\n that does not have anything as an output on.\n\n Ex:\n ---\n query = ALTER TABLE [YOUR TABLE]\n SET [COLMUN1]= ? -VALUE1-\n WHERE [COLOMUN2]=? -VALUE2-\n\n prepareQuery(query)\n insertRow([data1, data2])\n \"\"\"\n\n try:\n if data is None:\n self.cur.execute(self.query)\n else:\n self.cur.execute(self.query, data)\n\n self.conn.commit()\n return True\n except:\n return False\n\n def extractData(self, data=None):\n \"\"\"\n but before using it, use the prepareQuery(), then call this\n one with a list of the parameters(Values).\n\n for data extraction; but it does not give the rows,\n so you should know them.\n\n in case it returns None it means that there is something wrong with the query\n or it is just that the query output is empty.\n\n Ex1:\n ---\n query = SELECT * FROM TABLE [YOUR TABLE]\n WHERE [COLOMUN2]=? -VALUE1-\n\n prepareQuery(query)\n insertRow([data1])\n\n\n Ex2:\n ---\n query = SELECT * FROM TABLE [YOUR TABLE]\n\n prepareQuery(query)\n insertRow()\n \"\"\"\n try:\n if data is not None:\n self.cur.execute(self.query, data)\n else:\n self.cur.execute(self.query)\n\n for row in self.cur.fetchall():\n yield row\n\n except Error:\n yield None\n\n def notTableOutPutQuey(self, data=None):\n \"\"\"\n but before using it, use the prepareQuery(), then call this\n one with a list of the parameters(Values).\n\n this is made for -- none table -- output queries\n like:\n select coun(*)\n select max()....\n with out Goup By\n\n in case it returns None it means that there is some thing wrong with the query.\n\n Ex1:\n ----\n query = SELECT count(*) FROM [YOUR TABLE]\n prepareQuery(query)\n notTableOutPutQuery()\n\n Ex2:\n ----\n query = SELECT count(*) FROM [YOUR TABLE]\n HAVING BY [COLOMN]=? -VALUE-\n\n prepareQuery(query)\n notTableOutPutQuery([data])\n\n \"\"\"\n try:\n if data is not None:\n self.cur.execute(self.query, data)\n else:\n self.cur.execute(self.query)\n\n d = self.cur.fetchall()[0]\n return d[0]\n\n except Error:\n return None\n\n\nif __name__ == \"__main__\":\n pass\n", "sub_path": "easySqlite3.py", "file_name": "easySqlite3.py", "file_ext": "py", "file_size_in_byte": 5980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sqlite3.Connection", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 74, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 98, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 118, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 186, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 226, "usage_type": "name"}]} +{"seq_id": "48535343", "text": "import json\nimport operator\n\nclass Team:\n\n def __init__(self, name, distance):\n self.name = name\n self.distance = distance\n\nwith open('Prem_stadiums.json') as json_file:\n teams = json.load(json_file)\n\nall_teams = []\n\nfor team in teams['stadiums']:\n all_teams.append(Team(team['team'], 0))\n\n\nwith open('distances.json') as json_file:\n data = json.load(json_file)\n\ndistance = 0\n\nfor game in data:\n game1 = json.loads(game)\n teamA = game1['teamA']\n teamB = game1['teamB']\n\n distance = game1['distance']['rows'][0]['elements'][0]['distance']['value']\n\n for team in all_teams:\n if team.name == teamA:\n team.distance += distance\n elif team.name == teamB:\n team.distance += distance\n\nprint(all_teams)\n\nall_teams.sort(key=lambda x: x.distance, reverse=True)\n\n\nfor team in all_teams:\n print('Team: ' + str(team.name) + ', Distance: ' + str(team.distance) + 'km')", "sub_path": "AwayDays.py", "file_name": "AwayDays.py", "file_ext": "py", "file_size_in_byte": 933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "176427371", "text": "# -*- coding: utf-8 -*-\n\n\nfrom lms.djangoapps.courseware.courses import get_course_with_access\nfrom django.template.loader import render_to_string\nfrom django.shortcuts import render_to_response\nfrom web_fragments.fragment import Fragment\n\nfrom openedx.core.djangoapps.plugin_api.views import EdxFragmentView\n\nfrom opaque_keys.edx.keys import CourseKey\n\nfrom django.contrib.auth.models import User\nfrom lms.djangoapps.grades.course_grade_factory import CourseGradeFactory\nfrom lms.djangoapps.grades.models import PersistentCourseGrade\nfrom django.db.models import Avg, Max, Min, Sum\n\nfrom lms.djangoapps.courseware.access import has_access\nfrom lms.djangoapps.courseware.masquerade import setup_masquerade\nfrom django.db.models import prefetch_related_objects\nfrom openedx.features.course_duration_limits.access import generate_course_expired_fragment\n\nfrom django.db import transaction\nfrom .models import EolFeedback, SectionVisibility\n\nfrom django.http import Http404, HttpResponse\nfrom django.core.cache import cache\n\nfrom django.urls import reverse\n\n\ndef _get_context(request, course_id):\n \"\"\"\n Return all course/student/user data\n \"\"\"\n course_key = CourseKey.from_string(course_id)\n course = get_course_with_access(request.user, \"load\", course_key)\n\n # Get general info of course\n grade_cutoff, avg_grade, min_grade, max_grade = _get_course_info(course, course_key)\n\n # masquerade and student required for preview_menu (admin)\n staff_access = bool(has_access(request.user, 'staff', course))\n masquerade, student = setup_masquerade(request, course_key, staff_access, reset_masquerade_data=True)\n prefetch_related_objects([student], 'groups')\n if request.user.id != student.id:\n # refetch the course as the assumed student\n course = get_course_with_access(student, 'load', course_key, check_if_enrolled=True)\n course_grade = CourseGradeFactory().read(student, course) # Student grades\n courseware_summary = list(course_grade.chapter_grades.values())\n course_expiration_fragment = generate_course_expired_fragment(student, course) \n \n context = {\n \"course\": course,\n \"avg_grade\": avg_grade,\n \"min_grade\": min_grade,\n \"max_grade\": max_grade,\n \"grade_cutoff\": grade_cutoff,\n \"supports_preview_menu\": True,\n \"staff_access\": staff_access,\n \"masquerade\": masquerade,\n \"can_masquerade\": staff_access,\n \"student\": student,\n \"courseware_summary\": courseware_summary,\n \"grade_summary\": course_grade.summary,\n \"course_expiration_fragment\": course_expiration_fragment,\n \"grade_percent_scaled\": grade_percent_scaled,\n \"get_section_visibility\": get_section_visibility,\n \"get_feedback\": get_feedback,\n \"update_url\": reverse('feedback_post_update'),\n \"set_visibility_url\": reverse('feedback_post_set_visibility'),\n }\n return context\n\n\ndef _get_course_info(course, course_key):\n \"\"\"\n Calculate grade cutoff, average grade, min grade and max grade of all students enrolled in the course\n \"\"\"\n data = cache.get(\"eol_feedback-\" + course_key._to_string() + \"-course_info\") # cache\n if data is None:\n # Get active students on the course\n enrolled_students = User.objects.filter(\n courseenrollment__course_id=course_key,\n courseenrollment__is_active=1\n )\n\n # Get grade summary\n course_info = PersistentCourseGrade.objects.filter(\n user_id__in = enrolled_students,\n course_id = course_key\n ).aggregate(avg_percent = Avg('percent_grade'), min_percent = Min('percent_grade'), max_percent = Max('percent_grade'))\n avg_grade_percent = course_info.get('avg_percent', 0.)\n min_grade_percent = course_info.get('min_percent', 1.)\n max_grade_percent = course_info.get('max_percent', 0.)\n\n grade_cutoff = min(course.grade_cutoffs.values()) # Get the min value\n\n # Convert grade format\n avg_grade = grade_percent_scaled(avg_grade_percent, grade_cutoff) if avg_grade_percent is not None else 1.\n min_grade = grade_percent_scaled(min_grade_percent, grade_cutoff) if min_grade_percent is not None else 1.\n max_grade = grade_percent_scaled(max_grade_percent, grade_cutoff) if max_grade_percent is not None else 1.\n\n # cache\n data = [grade_cutoff, avg_grade, min_grade, max_grade]\n cache.set(\"eol_feedback-\" + course_key._to_string() + \"-course_info\", data, 60 * 5)\n\n return data[0], data[1], data[2], data[3] # grade_cutoff, avg_grade, min_grade, max_grade\n\n\ndef grade_percent_scaled(grade_percent, grade_cutoff):\n \"\"\"\n Scale grade percent by grade cutoff. Grade between 1.0 - 7.0\n \"\"\"\n if grade_percent == 0.:\n return 1.\n if grade_percent < grade_cutoff:\n return round(10. * (3. / grade_cutoff * grade_percent + 1.)) / 10.\n return round((3. / (1. - grade_cutoff) * grade_percent + (7. - (3. / (1. - grade_cutoff)))) * 10.) / 10.\n\n\ndef get_section_visibility(section_id, course_id):\n \"\"\"\n Return true/false if section is visible.\n \"\"\"\n try:\n visibility = SectionVisibility.objects.get(section_id=section_id, course_id=course_id)\n return visibility.is_visible\n except SectionVisibility.DoesNotExist:\n return False\n\n\ndef get_feedback(block_id):\n \"\"\"\n Return feedback text if exist\n \"\"\"\n try:\n feedback = EolFeedback.objects.get(block_id=block_id)\n return feedback.block_feedback\n except EolFeedback.DoesNotExist:\n return ''\n\n\nclass EolFeedbackFragmentView(EdxFragmentView):\n def render_to_fragment(self, request, course_id, **kwargs):\n if(not self.has_page_access(request.user, course_id)):\n raise Http404()\n context = _get_context(request, course_id)\n html = render_to_string('eol_feedback/eol_feedback_fragment.html', context)\n fragment = Fragment(html)\n return fragment\n \n\n def has_page_access(self, user, course_id):\n course_key = CourseKey.from_string(course_id)\n return User.objects.filter(\n courseenrollment__course_id=course_key,\n courseenrollment__is_active=1,\n pk = user.id\n ).exists()\n\n\ndef update_feedback(request):\n \"\"\"\n Update or create feedback of block_id by POST Method. Request must have block_id, block_feedback and course_id params.\n \"\"\"\n # check method and params\n if request.method != \"POST\":\n return HttpResponse(status=400)\n if 'block_id' not in request.POST or 'block_feedback' not in request.POST or 'course_id' not in request.POST:\n return HttpResponse(status=400)\n\n # check for access\n course_id = request.POST['course_id']\n course_key = CourseKey.from_string(course_id)\n course = get_course_with_access(request.user, \"load\", course_key)\n staff_access = bool(has_access(request.user, 'staff', course))\n if not staff_access:\n return HttpResponse(status=401)\n\n # get (and update) or create feedback\n block_id = request.POST['block_id']\n block_feedback = request.POST['block_feedback']\n try:\n feedback = EolFeedback.objects.get(block_id=block_id)\n feedback.block_feedback = block_feedback.strip()\n feedback.save()\n return HttpResponse(status=200)\n except EolFeedback.DoesNotExist:\n feedback = EolFeedback.objects.create(\n block_id=block_id,\n block_feedback=block_feedback.strip()\n )\n return HttpResponse(status=201)\n\n\ndef set_visibility(request):\n \"\"\"\n Update or create visibility of section_id by POST Method. Request must have section_id and course_id\n \"\"\"\n # check method and params\n if request.method != \"POST\":\n return HttpResponse(status=400)\n if 'section_id' not in request.POST or 'course_id' not in request.POST:\n return HttpResponse(status=400)\n\n # check for access\n course_id = request.POST['course_id']\n course_key = CourseKey.from_string(course_id)\n course = get_course_with_access(request.user, \"load\", course_key)\n staff_access = bool(has_access(request.user, 'staff', course))\n if not staff_access:\n return HttpResponse(status=401)\n\n # change or create visibility\n section_id = request.POST['section_id']\n try:\n visibility = SectionVisibility.objects.get(section_id=section_id, course_id=course_id)\n visibility.is_visible = not visibility.is_visible # change bool\n visibility.save()\n return HttpResponse(status=200)\n except SectionVisibility.DoesNotExist:\n visibility = SectionVisibility.objects.create(\n section_id=section_id,\n course_id=course_id,\n is_visible=True\n )\n return HttpResponse(status=201)\n", "sub_path": "eol_feedback/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "opaque_keys.edx.keys.CourseKey.from_string", "line_number": 36, "usage_type": "call"}, {"api_name": "opaque_keys.edx.keys.CourseKey", "line_number": 36, "usage_type": "name"}, {"api_name": "lms.djangoapps.courseware.courses.get_course_with_access", "line_number": 37, "usage_type": "call"}, {"api_name": "lms.djangoapps.courseware.access.has_access", "line_number": 43, "usage_type": "call"}, {"api_name": "lms.djangoapps.courseware.masquerade.setup_masquerade", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models.prefetch_related_objects", "line_number": 45, "usage_type": "call"}, {"api_name": "lms.djangoapps.courseware.courses.get_course_with_access", "line_number": 48, "usage_type": "call"}, {"api_name": "lms.djangoapps.grades.course_grade_factory.CourseGradeFactory", "line_number": 49, "usage_type": "call"}, {"api_name": "openedx.features.course_duration_limits.access.generate_course_expired_fragment", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 70, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 71, "usage_type": "call"}, {"api_name": "django.core.cache.cache.get", "line_number": 80, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 80, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 83, "usage_type": "name"}, {"api_name": "lms.djangoapps.grades.models.PersistentCourseGrade.objects.filter", "line_number": 89, "usage_type": "call"}, {"api_name": "lms.djangoapps.grades.models.PersistentCourseGrade.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "lms.djangoapps.grades.models.PersistentCourseGrade", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.Avg", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models.Min", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models.Max", "line_number": 92, "usage_type": "call"}, {"api_name": "django.core.cache.cache.set", "line_number": 106, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 106, "usage_type": "name"}, {"api_name": "models.SectionVisibility.objects.get", "line_number": 127, "usage_type": "call"}, {"api_name": "models.SectionVisibility.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.SectionVisibility", "line_number": 127, "usage_type": "name"}, {"api_name": "models.SectionVisibility.DoesNotExist", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.SectionVisibility", "line_number": 129, "usage_type": "name"}, {"api_name": "models.EolFeedback.objects.get", "line_number": 138, "usage_type": "call"}, {"api_name": "models.EolFeedback.objects", "line_number": 138, "usage_type": "attribute"}, {"api_name": "models.EolFeedback", "line_number": 138, "usage_type": "name"}, {"api_name": "models.EolFeedback.DoesNotExist", "line_number": 140, "usage_type": "attribute"}, {"api_name": "models.EolFeedback", "line_number": 140, "usage_type": "name"}, {"api_name": "openedx.core.djangoapps.plugin_api.views.EdxFragmentView", "line_number": 144, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 147, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 149, "usage_type": "call"}, {"api_name": "web_fragments.fragment.Fragment", "line_number": 150, "usage_type": "call"}, {"api_name": "opaque_keys.edx.keys.CourseKey.from_string", "line_number": 155, "usage_type": "call"}, {"api_name": "opaque_keys.edx.keys.CourseKey", "line_number": 155, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 156, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 156, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 169, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 171, "usage_type": "call"}, {"api_name": "opaque_keys.edx.keys.CourseKey.from_string", "line_number": 175, "usage_type": "call"}, {"api_name": "opaque_keys.edx.keys.CourseKey", "line_number": 175, "usage_type": "name"}, {"api_name": "lms.djangoapps.courseware.courses.get_course_with_access", "line_number": 176, "usage_type": "call"}, {"api_name": "lms.djangoapps.courseware.access.has_access", "line_number": 177, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 179, "usage_type": "call"}, {"api_name": "models.EolFeedback.objects.get", "line_number": 185, "usage_type": "call"}, {"api_name": "models.EolFeedback.objects", "line_number": 185, "usage_type": "attribute"}, {"api_name": "models.EolFeedback", "line_number": 185, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 188, "usage_type": "call"}, {"api_name": "models.EolFeedback.DoesNotExist", "line_number": 189, "usage_type": "attribute"}, {"api_name": "models.EolFeedback", "line_number": 189, "usage_type": "name"}, {"api_name": "models.EolFeedback.objects.create", "line_number": 190, "usage_type": "call"}, {"api_name": "models.EolFeedback.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "models.EolFeedback", "line_number": 190, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 194, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 203, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 205, "usage_type": "call"}, {"api_name": "opaque_keys.edx.keys.CourseKey.from_string", "line_number": 209, "usage_type": "call"}, {"api_name": "opaque_keys.edx.keys.CourseKey", "line_number": 209, "usage_type": "name"}, {"api_name": "lms.djangoapps.courseware.courses.get_course_with_access", "line_number": 210, "usage_type": "call"}, {"api_name": "lms.djangoapps.courseware.access.has_access", "line_number": 211, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 213, "usage_type": "call"}, {"api_name": "models.SectionVisibility.objects.get", "line_number": 218, "usage_type": "call"}, {"api_name": "models.SectionVisibility.objects", "line_number": 218, "usage_type": "attribute"}, {"api_name": "models.SectionVisibility", "line_number": 218, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 221, "usage_type": "call"}, {"api_name": "models.SectionVisibility.DoesNotExist", "line_number": 222, "usage_type": "attribute"}, {"api_name": "models.SectionVisibility", "line_number": 222, "usage_type": "name"}, {"api_name": "models.SectionVisibility.objects.create", "line_number": 223, "usage_type": "call"}, {"api_name": "models.SectionVisibility.objects", "line_number": 223, "usage_type": "attribute"}, {"api_name": "models.SectionVisibility", "line_number": 223, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 228, "usage_type": "call"}]} +{"seq_id": "56990884", "text": "import numpy as np\nfrom scipy.signal import hilbert, tukey\n\n\ndef fast_hilbert(array):\n n_points = array.shape[0]\n n_fft = next_power2(n_points)\n return hilbert(array, n_fft)[:n_points]\n\n\ndef next_power2(num):\n \"\"\"Compute the smallest power of 2 >= to num.(float -> int)\"\"\"\n return 2 ** int(np.ceil(np.log2(num)))\n\n\ndef split_signal(X, n_splits=1, apply_window=True):\n \"\"\"Split the signal in n_splits chunks for faster training.\n\n Parameters\n ----------\n X : ndarray, shape (n_channels, n_times)\n Signal to split. It should be only one signal.\n n_splits : int (default: 1)\n Number of splits\n apply_window : bool (default: True)\n If set to True, a tukey window is applied to each split to\n reduce the border artifacts.\n\n Return\n ------\n X_split: ndarray, shape (n_splits, n_channels, n_times // n_splits)\n The signal splitted in n_splits.\n \"\"\"\n assert X.ndim == 2, (\n \"This splitting utility is only designed for one multivariate \"\n \"signal (n_channels, n_times). Found X.ndim={}.\".format(X.ndim))\n\n n_splits = int(n_splits)\n assert n_splits > 0, \"The number of splits should be large than 0.\"\n\n n_channels, n_times = X.shape\n n_times = n_times // n_splits\n X_split = X[:, :n_splits * n_times]\n X_split = X_split.reshape(n_channels, n_splits, n_times).swapaxes(0, 1)\n\n # Apply a window to the signal to reduce the border artifacts\n if apply_window:\n X_split *= tukey(n_times, alpha=0.1)[None, None, :]\n\n return X_split\n", "sub_path": "alphacsc/utils/signal.py", "file_name": "signal.py", "file_ext": "py", "file_size_in_byte": 1548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scipy.signal.hilbert", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.signal.tukey", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "476053218", "text": "import enum\nimport logging\nimport math\nfrom typing import Iterator, Optional\n\nimport determined as det\nfrom determined.common.experimental.session import Session\n\nlogger = logging.getLogger(\"determined.core\")\n\n\nclass Unit(enum.Enum):\n EPOCHS = \"UNIT_EPOCHS\"\n RECORDS = \"UNIT_RECORDS\"\n BATCHES = \"UNIT_BATCHES\"\n\n\nclass SearcherOp:\n def __init__(\n self,\n session: Session,\n trial_id: int,\n unit: Unit,\n length: int,\n ) -> None:\n self._session = session\n self._trial_id = trial_id\n self._unit = unit\n self._length = length\n self._completed = False\n\n @property\n def unit(self) -> Unit:\n return self._unit\n\n @property\n def length(self) -> int:\n return self._length\n\n @property\n def records(self) -> int:\n if self._unit != Unit.RECORDS:\n raise RuntimeError(\n \"you can only read op.records if you configured your searcher in terms of records\"\n )\n return self._length\n\n @property\n def batches(self) -> int:\n if self._unit != Unit.BATCHES:\n raise RuntimeError(\n \"you can only read op.batches if you configured your searcher in terms of batches\"\n )\n return self._length\n\n @property\n def epochs(self) -> int:\n if self._unit != Unit.EPOCHS:\n raise RuntimeError(\n \"you can only read op.epochs if you configured your searcher in terms of epochs\"\n )\n return self._length\n\n def report_progress(self, length: float) -> None:\n if self._completed and length != self._length:\n raise RuntimeError(\"you must not call op.report_progress() after op.complete()\")\n logger.debug(f\"op.report_progress({length})\")\n self._session.post(\n f\"/api/v1/trials/{self._trial_id}/progress\",\n data=det.util.json_encode(length),\n )\n\n def complete(self, searcher_metric: float) -> None:\n if self._completed:\n raise RuntimeError(\"you may only call op.complete() once\")\n if math.isnan(searcher_metric):\n raise RuntimeError(\"searcher_metric may not be NaN\")\n self._completed = True\n body = {\n \"op\": {\n \"length\": {\n \"length\": self._length,\n \"unit\": self._unit.value,\n }\n },\n \"searcherMetric\": searcher_metric,\n }\n logger.debug(f\"op.complete({searcher_metric})\")\n self._session.post(\n f\"/api/v1/trials/{self._trial_id}/searcher/completed_operation\",\n data=det.util.json_encode(body),\n )\n\n\nclass Searcher:\n \"\"\"\n Searcher gives direct access to operations emitted by the search algorithm in the master. Each\n SearcherOp emitted has a (unitless) length that you should train for, then you complete the op\n by reporting the validation metric you are searching over.\n\n It is the user's responsibility to execute the required training. Since the user configured the\n length of the searcher in the experiment configuration, the user should know if the unitless\n length represents epochs, batches, records, etc.\n\n It is also the user's responsibility to evaluate the model after training and report the correct\n metric; if you intend to search over a metric called val_accuracy, you should report\n val_accuracy.\n\n Lastly, it is recommended (not required) to report progress periodically, so that the webui can\n accurately reflect current progress. Progress is another unitless length.\n\n Example:\n\n .. code:: python\n\n # We'll pretend we configured we configured the searcher\n # in terms of batches, so we will interpet the the op.length as a\n # count of batches.\n # Note that you'll have to load your starting point from a\n # checkpoint if you want to support pausing/continuing training.\n batches_trained = 0\n\n for op in generic_context.searcher.ops():\n # Train for however long the op requires you to.\n # Note that op.length is an absolute length, not an\n # incremental length:\n while batches_trained < op.length:\n my_train_batch()\n\n batches_trained += 1\n\n # Reporting progress every batch would be expensive:\n if batches_trained % 1000:\n op.report_progress(batches_trained)\n\n # After training the required amount, pass your searcher\n # metric to op.complete():\n val_metrics = my_validate()\n op.complete(val_metrics[\"my_searcher_metric\"])\n\n Note that reporting metrics is completely independent of the Searcher API, via\n ``core_context.training.report_training_metrics()`` or\n ``core_context.training.report_validation_metrics()``.\n \"\"\"\n\n def __init__(self, session: Session, trial_id: int, run_id: int, allocation_id: str) -> None:\n self._session = session\n self._trial_id = trial_id\n self._run_id = run_id\n self._allocation_id = allocation_id\n\n def _get_searcher_op(self) -> Optional[SearcherOp]:\n logger.debug(\"_get_searcher_op()\")\n r = self._session.get(f\"/api/v1/trials/{self._trial_id}/searcher/operation\")\n body = r.json()\n if body[\"completed\"]:\n return None\n\n length = body[\"op\"][\"validateAfter\"][\"length\"]\n return SearcherOp(\n self._session, self._trial_id, unit=Unit(length[\"unit\"]), length=length[\"length\"]\n )\n\n def ops(self, auto_ack: bool = True) -> Iterator[SearcherOp]:\n \"\"\"\n Iterate through all the ops this searcher has to offer.\n\n The caller must call op.complete() on each operation.\n \"\"\"\n\n while True:\n op = self._get_searcher_op()\n if op is None:\n if auto_ack:\n self.acknowledge_out_of_ops()\n break\n yield op\n if not op._completed:\n raise RuntimeError(\"you must call op.complete() on each operation\")\n\n def acknowledge_out_of_ops(self) -> None:\n \"\"\"\n acknowledge_out_of_ops() tells the Determined master that you are shutting down because\n you have recognized the searcher has no more operations for you to complete at this time.\n\n This is important for the Determined master to know that it is safe to restart this process\n should new operations be assigned to this trial.\n\n acknowledge_out_of_ops() is normally called automatically just before ops() raises a\n StopIteration, unless ops() is called with auto_ack=False.\n \"\"\"\n logger.debug(f\"acknowledge_out_of_ops(allocation_id:{self._allocation_id})\")\n self._session.post(f\"/api/v1/allocations/{self._allocation_id}/signals/ack_preemption\")\n\n\nclass DummySearcherOp(SearcherOp):\n def __init__(self, unit: Unit, length: int) -> None:\n self._unit = unit\n self._length = length\n self._completed = False\n\n def report_progress(self, length: float) -> None:\n if self._completed and length != self._length:\n raise RuntimeError(\"you must not call op.report_progress() after op.complete()\")\n logger.info(\"progress report: {length}/{self._length}\")\n\n def complete(self, searcher_metric: float) -> None:\n if self._completed:\n raise RuntimeError(\"you may only call op.complete() once\")\n if math.isnan(searcher_metric):\n raise RuntimeError(\"searcher_metric may not be NaN\")\n self._completed = True\n logger.info(f\"SearcherOp Complete: searcher_metric={det.util.json_encode(searcher_metric)}\")\n\n\nclass DummySearcher(Searcher):\n \"\"\"Yield a singe search op. We need a way for this to be configurable.\"\"\"\n\n def __init__(self, unit: Unit = Unit.EPOCHS, length: int = 1) -> None:\n self._unit = unit\n self._length = length\n\n def ops(self, auto_ack: bool = True) -> Iterator[SearcherOp]:\n op = DummySearcherOp(self._unit, self._length)\n yield op\n if not op._completed:\n raise RuntimeError(\"you must call op.complete() on each operation\")\n\n def acknowledge_out_of_ops(self) -> None:\n pass\n", "sub_path": "harness/determined/_core/_searcher.py", "file_name": "_searcher.py", "file_ext": "py", "file_size_in_byte": 8282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 12, "usage_type": "attribute"}, {"api_name": "determined.common.experimental.session.Session", "line_number": 21, "usage_type": "name"}, {"api_name": "determined.util.json_encode", "line_number": 70, "usage_type": "call"}, {"api_name": "determined.util", "line_number": 70, "usage_type": "attribute"}, {"api_name": "math.isnan", "line_number": 76, "usage_type": "call"}, {"api_name": "determined.util.json_encode", "line_number": 91, "usage_type": "call"}, {"api_name": "determined.util", "line_number": 91, "usage_type": "attribute"}, {"api_name": "determined.common.experimental.session.Session", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 164, "usage_type": "name"}, {"api_name": "math.isnan", "line_number": 210, "usage_type": "call"}, {"api_name": "determined.util.json_encode", "line_number": 213, "usage_type": "call"}, {"api_name": "determined.util", "line_number": 213, "usage_type": "attribute"}, {"api_name": "typing.Iterator", "line_number": 223, "usage_type": "name"}]} +{"seq_id": "502045571", "text": "from itertools import product\nfrom typing import Any, Dict, Iterator, List, Optional, Tuple, Type, TypeVar\nfrom urllib import parse\n\nfrom ..defs.fetcher import Discovery\nfrom ..utils import format_object\n\n\nT = TypeVar('T', bound='Direction')\nclass Direction:\n def __init__(self, *, link='', link_arg='', state: Dict[str, str] = None) -> None:\n self.link = link\n self.link_arg = link_arg\n self.state = state or {}\n\n def encode(self) -> str:\n link_part = parse.urlencode({self.link: self.link_arg} if self.link else {})\n state_part = parse.urlencode(sorted(self.state.items()), doseq=True)\n return '{}?{}'.format(link_part, state_part)\n\n @classmethod\n def encode_list(cls: Type[T], directions: List[T]) -> str:\n return '/'.join(dirn.encode() for dirn in directions)\n\n @classmethod\n def decode(cls: Type[T], payload: str) -> T:\n link_part, state = [dict(parse.parse_qsl(part, keep_blank_values=True))\n for part in payload.split('?')]\n if link_part:\n [(link, link_arg)] = link_part.items()\n else:\n link = link_arg = ''\n\n return cls(link=link, link_arg=link_arg, state=state)\n\n @classmethod\n def decode_list(cls: Type[T], payload: str) -> List[T]:\n return [cls.decode(seg) for seg in payload.split('/')] if payload else [cls()]\n\n def __repr__(self) -> str:\n return format_object(self, ['link', 'link_arg', 'state'])\n\n def link_eq(self: T, other: Any) -> bool:\n if type(other) != type(self):\n return False\n return self.link == other.link and self.link_arg == other.link_arg\n\n def __eq__(self: T, other: Any) -> bool:\n if type(other) != type(self):\n return False\n return self.link_eq(other) and self.state == other.state\n\n def __hash__(self) -> int:\n return hash(self.encode())\n\n\nclass CusisDiscovery(Discovery):\n def __init__(self, *, directions, model=None, states: Dict[str, List[str]] = None,\n links: List[Tuple[str, str]] = None,\n exclude: Dict[str, str] = None, page=None) -> None:\n self.directions = directions\n self.path = Direction.encode_list(directions)\n self.model = model\n self.states = states or {}\n self.exclude = exclude\n self.links = links or []\n self.page = page\n self.sampled = None # type: Optional[List[str]]\n\n def edges(self) -> Iterator[str]:\n state_keys = list(self.states.keys())\n prefix, sep, postfix = self.path.rpartition('/')\n last_dirn = Direction.decode(postfix)\n\n if self.states:\n for state_values in product(*self.states.values()):\n for key, value in zip(state_keys, state_values):\n last_dirn.state[key] = value\n if last_dirn.state != self.exclude:\n yield prefix + sep + last_dirn.encode()\n\n new_dirn = Direction()\n\n for link, link_arg in self.links:\n new_dirn.link = link\n new_dirn.link_arg = link_arg\n yield self.path + '/' + new_dirn.encode()\n\n def edge_len(self) -> int:\n \"\"\"Returns the number of out-going edges.\"\"\"\n if self.states:\n states = 1\n for state_list in self.states.values():\n states *= len(state_list)\n states -= bool(self.exclude)*1\n else:\n states = 0\n\n return states + len(self.links)\n\n def __repr__(self) -> str:\n return format_object(self, ['path', 'states', 'links'])\n", "sub_path": "scraper/scraper/cusis/fetcher_defs.py", "file_name": "fetcher_defs.py", "file_ext": "py", "file_size_in_byte": 3597, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "typing.TypeVar", "line_number": 9, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 17, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 26, "usage_type": "name"}, {"api_name": "urllib.parse.parse_qsl", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 37, "usage_type": "name"}, {"api_name": "utils.format_object", "line_number": 41, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 48, "usage_type": "name"}, {"api_name": "defs.fetcher.Discovery", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 60, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 76, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 70, "usage_type": "name"}, {"api_name": "utils.format_object", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "383676684", "text": "from pathlib import Path\n\nimport os\n\nfrom modules.exceptions import AbsentUserConfigException, NotDefinedConfigValue\n\n\nclass UserConfig:\n API_KEY_CONFIG_NAME = 'key'\n INPUT_FILE_CONFIG_NAME = 'input_file'\n\n def __init__(self):\n self.key, self.input_file = self.read_user_config()\n if not self.key:\n raise NotDefinedConfigValue('Api key is not defined')\n if not self.input_file:\n raise NotDefinedConfigValue('Input file is not defined')\n\n def read_user_config(self):\n api_key, input_file = None, None\n user_config = self.get_user_config()\n if not os.path.exists(user_config):\n raise AbsentUserConfigException('Config at ~/.az is not found')\n with open(user_config) as config:\n for line in config:\n key, value = line.split('=')\n if key == self.API_KEY_CONFIG_NAME:\n api_key = value.strip()\n elif key == self.INPUT_FILE_CONFIG_NAME:\n input_file = value.strip()\n return api_key, input_file\n\n @staticmethod\n def get_user_config():\n if os.path.exists('.az'):\n return '.az'\n home_dir = Path.home()\n return os.path.join(home_dir, '.az')\n", "sub_path": "modules/cli/user_config.py", "file_name": "user_config.py", "file_ext": "py", "file_size_in_byte": 1266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "modules.exceptions.NotDefinedConfigValue", "line_number": 15, "usage_type": "call"}, {"api_name": "modules.exceptions.NotDefinedConfigValue", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "modules.exceptions.AbsentUserConfigException", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pathlib.Path.home", "line_number": 37, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "504381502", "text": "\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.decomposition import PCA\n\nfrom cupcake.smush.base import SmushPlotterBase\n\n\n\nclass PCAPlotter(SmushPlotterBase):\n\n axis_label = 'Principal Component {:d}'\n\n def __init__(self, data, n_components, color, hue, hue_order,\n palette, saturation, marker, marker_order, text, text_order,\n linewidth, linewidth_order, edgecolor,\n edgecolor_order, **pca_kws):\n \"\"\"Initialize the variables and data for plotting PCA\n\n Parameters\n ----------\n {input_params}\n\n \"\"\"\n\n pca_kws = {} if pca_kws is None else pca_kws\n pca_kws.setdefault('n_components', n_components)\n\n self.establish_variables(data)\n self.establish_colors(color, hue, hue_order, palette, saturation)\n self.establish_symbols(marker, marker_order, text, text_order,\n linewidth, linewidth_order, edgecolor,\n edgecolor_order)\n\n self.establish_reducer(PCA, n_components, pca_kws)\n self.compute_reduction()\n\n def plot(self, ax=None, legend=True, legend_kws=None, title='', **kwargs):\n if ax is None:\n ax = plt.gca()\n\n if self.groupby is not None:\n grouped = self.plot_data.groupby(self.groupby)\n colors = sns.color_palette(self.palette, n_colors=len(grouped.groups))\n# with sns.color_palette(palette, n_colors=len(grouped.groups)):\n for color, (group, df) in zip(colors, self.plot_data.groupby(self.groupby)):\n # marker = group_to_marker[group]\n# color =\n ax.plot(df.iloc[:, 0], df.iloc[:, 1], 'o',\n label=group, color=color, **kwargs)\n else:\n ax.plot(self.decomposed.iloc[:, 0], self.decomposed.iloc[:, 1],\n 'o', **kwargs)\n\n legend_kws = {} if legend_kws is None else legend_kws\n legend_kws.setdefault('loc', 'best')\n if legend:\n ax.legend(**legend_kws)\n explained_variance = self.reducer.explained_variance_ratio_ * 100\n xmin, xmax, ymin, ymax = ax.axis()\n vmax = max(xmax, ymax)\n vmin = min(xmin, ymin)\n vlim = (vmin, vmax)\n ax.set(xlabel='PC1 explains {:.2f}% of variance'.format(explained_variance[0]),\n ylabel='PC2 explains {:.2f}% of variance'.format(explained_variance[1]),\n title=title, xlim=vlim, ylim=vlim)\n\n def draw_loadings(self, ax):\n pass\n\n\n# def pcaplot(data, hue_groupby=None, palette=None):\n\n", "sub_path": "cupcake/smush/decomposition.py", "file_name": "decomposition.py", "file_ext": "py", "file_size_in_byte": 2612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cupcake.smush.base.SmushPlotterBase", "line_number": 10, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 35, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "seaborn.color_palette", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "411051582", "text": "import sqlalchemy as sa\nfrom sqlalchemy.ext.mutable import MutableDict\n\nfrom core.models.base_class import Base, TimestampMixin\n\n\nclass TrackCountLog(Base, TimestampMixin):\n # noinspection SpellCheckingInspection\n __tablename__ = \"TrackCountLog\"\n track_id = sa.Column(\n \"TrackID\", sa.String(32), unique=True, nullable=False, primary_key=True\n )\n data_source_count = sa.Column(\"DataSourceCount\", MutableDict.as_mutable(sa.JSON))\n percentage_count = sa.Column(\"PercentageCount\", MutableDict.as_mutable(sa.JSON))\n", "sub_path": "core/models/trackcountlog.py", "file_name": "trackcountlog.py", "file_ext": "py", "file_size_in_byte": 535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "core.models.base_class.Base", "line_number": 7, "usage_type": "name"}, {"api_name": "core.models.base_class.TimestampMixin", "line_number": 7, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.mutable.MutableDict.as_mutable", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.mutable.MutableDict", "line_number": 13, "usage_type": "name"}, {"api_name": "sqlalchemy.JSON", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.mutable.MutableDict.as_mutable", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.mutable.MutableDict", "line_number": 14, "usage_type": "name"}, {"api_name": "sqlalchemy.JSON", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "521171312", "text": "import pandas as pd\nfrom tkinter import Tk, END, LabelFrame, Text, Label, Frame, Button, LEFT\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom numpy import abs as np_abs\nfrom numpy.fft import rfft\nimport soundfile as sf\nimport numpy as np\n\nsc = StandardScaler()\nlen_spectr = 250\ndata = pd.read_csv('itog1.csv', sep = ';')\nX = data.iloc[:, 0:len_spectr].values\ny = data.iloc[:, 255].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=50)\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nregressor = RandomForestClassifier(n_estimators=200, random_state=50)\nregressor.fit(X_train, y_train)\ny_pred = regressor.predict(X_test)\n\ndef getText():\n s = text.get(1.0, END)\n s = s[0: len(s)-1]\n \n key = True\n len_part = 390000\n\n music_sig, samplerate = sf.read(s)\n\n for i_on_sig in range (0, int(len(music_sig)/len_part)):\n sig = music_sig[i_on_sig*len_part:(i_on_sig+1)*len_part]\n\n N = int(len(sig)/1024)*1024\n spectrum_array = [0]*512\n\n for i in range(0, N-1, 1024):\n time_array = [0]*1024\n for j in range(0, 1023):\n time_array[j] = sig[j+i][0]\n spectrum = rfft(time_array, 1024)\n spectrum_abc = np_abs(spectrum)\n for j in range(0, 511):\n spectrum_array[j] += spectrum_abc[j]\n\n for j in range(0, 511):\n spectrum_array[j] = spectrum_array[j]/int(len(sig)/1024)\n\n y = np.array(spectrum_array)/N\n\n if key == False:\n a += [y[0:len_spectr]]\n\n if key == True: \n a = [y[0:len_spectr]]\n key = False\n\n \n Test = a[0:len_spectr]\n \n Test = sc.transform(Test)\n\n \n y_pred_test = regressor.predict(Test)\n\n print(y_pred_test)\n\n arr = y_pred_test\n num = arr[0]\n N = len(arr)\n max_frq = 1\n\n for i in range(N-1):\n frq = 1\n for k in range(i+1,N):\n if arr[i] == arr[k]:\n frq += 1\n if frq > max_frq:\n max_frq = frq\n num = arr[i]\n if num == 0: label['text'] = 'Хипхоп'\n if num == 6: label['text'] = 'Электроника'\n if num == 2: label['text'] = 'Джаз'\n if num == 3: label['text'] = 'Рок'\n if num == 4: label['text'] = 'Классика'\n \ndef deleteText():\n text.delete(1.0, END)\n label['text'] = ''\n \nroot = Tk()\nroot.title(\"Определитель жанра 1.3\")\nf_top = LabelFrame (root)\nf_top = LabelFrame(text=\"Путь к файлу(Желательно использование форматов wav или flac):\")\nf_top.pack(padx = 10, pady = 10)\ntext = Text(f_top, width=50, height=1)\ntext.pack()\n\nlabel = Label(f_top)\nlabel.pack() \n\nframe = Frame()\nframe.pack()\n \nb_get = Button(frame, text=\"Определить жанр\", command=getText)\nb_get.pack(side=LEFT)\n \nb_delete = Button(frame, text=\"Удалить\", command=deleteText)\nb_delete.pack(side=LEFT)\n\nroot.mainloop()\n", "sub_path": "DOTGOM/programm.py", "file_name": "programm.py", "file_ext": "py", "file_size_in_byte": 3081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 19, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 24, "usage_type": "argument"}, {"api_name": "soundfile.read", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.fft.rfft", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 89, "usage_type": "argument"}, {"api_name": "tkinter.Tk", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 94, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 95, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 97, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 100, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 103, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 106, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 107, "usage_type": "name"}, {"api_name": "tkinter.Button", "line_number": 109, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "204465801", "text": "# coding=utf-8\nfrom os import environ\nfrom datetime import datetime\n\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport unittest\nimport router\n\nimport constants\nfrom pages.mainpage import MainPage\nfrom pages.video_list import VideoListPage\nfrom pages.video import VideoPage\nfrom pages.wall import WallPost, VideoSelector\nfrom utils import print_test_info\n\nSCREENSHOT_PATH = constants.SCREENSHOT_PATH + '/video/'\n\n\nclass VideoTest(unittest.TestCase):\n LOGIN = environ['LOGIN'] # type: strТ\n PASSWORD = environ['PASSWORD'] # type: str\n driver = None # type: webdriver.Remote\n TEST_VIDEO_ID = 657791912610\n TEST_EXTERNAL_VIDEO_LINK = 'https://www.youtube.com/watch?v=OPf0YbXqDm0'\n\n @classmethod\n def setUpClass(cls):\n browser = environ.get('BROWSER', 'CHROME')\n\n cls.driver = webdriver.Remote(\n command_executor=constants.COMMAND_EXECUTOR,\n desired_capabilities=getattr(DesiredCapabilities, browser).copy()\n )\n\n router.Router(driver=cls.driver)\n\n def setUp(self):\n self.driver.refresh()\n main_page = MainPage(self.driver)\n main_page.open()\n main_page.authentificate(self.LOGIN, self.PASSWORD)\n if constants.MAKE_SCREENSHOTS:\n self.driver.save_screenshot(\n SCREENSHOT_PATH + 'login/{time}.png'.format(time=datetime.now().time().isoformat()))\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_going_to_videopage(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoListPage(self.driver)\n\n video_page.wait_for_load()\n\n self.assertEqual(self.driver.current_url, constants.BASE_URL + VideoListPage.PATH + 'top')\n\n if constants.MAKE_SCREENSHOTS:\n self.driver.save_screenshot(\n SCREENSHOT_PATH + 'videopage/{time}.png'.format(time=datetime.now().time().isoformat()))\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_post_video(self):\n main_page = MainPage(self.driver)\n main_page.open_note()\n\n wall_post = WallPost(self.driver)\n wall_post.open_video_select_dialog()\n\n video_page = VideoSelector(self.driver)\n video_page.select_first()\n\n wall_post.check_exist_video()\n wall_post.post()\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_post_multiple_videos(self):\n main_page = MainPage(self.driver)\n main_page.open_note()\n\n wall_post = WallPost(self.driver)\n\n expected_video_count = 2\n for i in range(expected_video_count):\n wall_post.open_video_select_dialog()\n video_selector = VideoSelector(self.driver)\n video_selector.select_first()\n\n added_video_count = wall_post.get_added_blocks_count()\n\n self.assertTrue(added_video_count == expected_video_count)\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_post_video_with_text(self):\n \"\"\"\n Описание: Можно добавить видео к посту через диалог и опубликовать его (с текстом)\n \"\"\"\n main_page = MainPage(self.driver)\n main_page.open_note()\n\n wall_post = WallPost(self.driver)\n wall_post.write_post(\"Post\")\n wall_post.open_video_select_dialog()\n\n video_page = VideoSelector(self.driver)\n video_page.select_first()\n wall_post.post()\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_post_video_with_text_smile(self):\n \"\"\"\n Описание: Можно добавить видео к посту через диалог и опубликовать его (со смайлом)\n \"\"\"\n main_page = MainPage(self.driver)\n main_page.open_note()\n\n wall_post = WallPost(self.driver)\n wall_post.write_post(\"Post\")\n wall_post.open_smile_list()\n wall_post.add_smile_totext()\n wall_post.close_smile_list()\n wall_post.open_video_select_dialog()\n\n video_page = VideoSelector(self.driver)\n video_page.select_first()\n wall_post.post()\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_post_video_with_smile(self):\n \"\"\"\n Описание: Можно добавить видео к посту через диалог и опубликовать его (со смайлом)\n \"\"\"\n main_page = MainPage(self.driver)\n main_page.open_note()\n\n wall_post = WallPost(self.driver)\n wall_post.open_smile_list()\n wall_post.add_smile_totext()\n wall_post.close_smile_list()\n wall_post.open_video_select_dialog()\n\n video_page = VideoSelector(self.driver)\n video_page.select_first()\n wall_post.post()\n\n @unittest.skip(\"WIP\")\n def test_outer_post_upload_video(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n video_list_page = VideoListPage(self.driver)\n video = video_list_page.wait_and_get_video_by_num(0)\n before_video_id = video.get_attribute('data-id')\n\n main_page.go_to_main()\n wall_post = WallPost(self.driver)\n attach_video_input = wall_post.get_attach_video_input()\n attach_video_input.send_keys('content/video.mp4')\n\n video_list_page = VideoListPage(self.driver)\n video = video_list_page.wait_and_get_video_by_num(0)\n after_video_id = video.get_attribute('data-id')\n\n self.assertFalse(before_video_id == after_video_id)\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_scrolling_loads_videos(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoListPage(self.driver)\n\n videos_portion_count = video_page.video_count\n\n video_page.scroll_videos_to(1000000)\n\n # check that a video, that wasn't there, is now loaded\n video = video_page.wait_and_get_video_by_num(videos_portion_count + 1)\n\n self.assertTrue(video, 'Didn`t load videos on scroll')\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_external_upload_video_and_delete_video(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_list_page = VideoListPage(self.driver)\n\n router.Router().open_my_videos_by_url()\n\n video_count_initial = video_list_page.video_count\n\n video_upload_dialog = video_list_page.open_video_upload_dialog()\n\n video_upload_dialog.open_external_upload_dialog()\n\n video_upload_dialog.add_external_video(self.TEST_EXTERNAL_VIDEO_LINK)\n\n video_list_page.wait_until_popup_is_closed()\n\n video = video_list_page.wait_and_get_video_by_num(0)\n\n self.assertEqual(video_count_initial + 1, video_list_page.video_count, 'Video wasn`t added')\n video_id = video.get_attribute('data-id')\n\n video_list_page.delete_video(video)\n video_ids_after_delete = video_list_page.video_ids\n\n self.assertNotIn(video_id, video_ids_after_delete, 'Video was not removed')\n self.assertEqual(video_count_initial, video_list_page.video_count)\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_upload_video(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoListPage(self.driver)\n\n router.Router().open_my_videos_by_url()\n video_count_initial = video_page.video_count\n upload_page = video_page.open_video_upload_dialog()\n\n upload_page.upload_file()\n video_page.wait_load()\n video_page.wait_noload()\n\n video_count_second = video_page.video_count\n video = video_page.wait_and_get_video_by_num(0)\n\n self.assertEqual(video_count_initial + 1, video_count_second)\n\n video_page.delete_video(video)\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_attach_video_message(self):\n main_page = MainPage(self.driver)\n\n message_page = main_page.go_to_message()\n message_page.open_first_dialog()\n\n selector = message_page.open_video_dialog()\n selector.select_first()\n\n initial_message_count = message_page.message_count()\n message_page.send()\n second_message_count = message_page.message_count()\n\n self.assertTrue(second_message_count > initial_message_count,\n \"{0} !> {1}\".format(second_message_count, initial_message_count))\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_comment_stream(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoPage(self.driver)\n video_page_list = VideoListPage(self.driver)\n\n router.Router().open_live()\n video_page_list.wait_open_stream()\n\n video = video_page_list.wait_and_get_video_by_num(0)\n\n video_page.open_by_id(video.get_attribute('data-id'))\n video_page.watch_video()\n video_page.send_comment('Test{0}'.format(self.LOGIN))\n video_page.find_comment_with_text('Test{0}'.format(self.LOGIN))\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_open_video(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoPage(self.driver)\n video_page_list = VideoListPage(self.driver)\n\n router.Router().open_new()\n video = video_page_list.wait_and_get_video_by_num(0)\n video_page.open_by_id(video.get_attribute('data-id'))\n video_page.watch_video()\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_subscription(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoListPage(self.driver)\n router.Router().open_subscriptions()\n initial_count = video_page.count_subscribtions()\n\n video_page.search('Test')\n video_page.wait_open_search()\n video_page.subscribe()\n\n router.Router().open_subscriptions()\n second_count = video_page.count_subscribtions()\n\n self.assertEqual(initial_count + 1, second_count,\n \"Not equals: {0} + 1 != {1}\".format(initial_count, second_count))\n\n initial_count = second_count\n\n video_page.search('Test')\n video_page.wait_open_search()\n video_page.unsubscribe()\n\n router.Router().open_subscriptions()\n second_count = video_page.count_subscribtions()\n\n self.assertEqual(initial_count - 1, second_count,\n \"Not equals: {0} - 1 != {1}\".format(initial_count, second_count))\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_comment_video(self):\n video_page = VideoPage(self.driver)\n video_page.open_by_id(self.TEST_VIDEO_ID)\n\n video_page.send_comment('Test{0}'.format(self.LOGIN))\n element = video_page.find_comment_with_text('Test{0}'.format(self.LOGIN))\n video_page.remove_comment(element)\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_open_stream(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoPage(self.driver)\n video_page_list = VideoListPage(self.driver)\n\n router.Router().open_live()\n video = video_page_list.wait_and_get_video_by_num(0)\n video_page.open_by_id(video.get_attribute('data-id'))\n video_page.watch_video()\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_search(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoListPage(self.driver)\n video_page.search('Test')\n video_page.wait_open_search()\n video_id = int(video_page.wait_and_get_video_by_num(0).get_attribute('data-id'))\n self.assertEqual(video_id, 1691355875,\n 'First video need be https://ok.ru/video/1691355875 by \"Test\" request. Not {0}'.format(\n video_id))\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_tab_change(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n router.Router().open_top()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'top'))\n\n router.Router().open_liveapp()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'liveApp'))\n\n router.Router().open_new()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'new'))\n\n router.Router().open_live()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'live'))\n\n router.Router().open_suggest()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'suggestedAlbums'))\n\n router.Router().open_catalog()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'channels'))\n\n router.Router().open_my_videos()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'myVideo'))\n\n router.Router().open_subscriptions()\n self.assertEqual(self.driver.current_url, \"{0}video/{1}\".format(constants.BASE_URL, 'subscriptions'))\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_video_watch_later(self):\n video_list_page = VideoListPage(self.driver)\n video_page = VideoPage(self.driver)\n\n router.Router().open_watchlater()\n\n test_vid_in_watchlater = self.TEST_VIDEO_ID in video_list_page.video_ids\n\n if test_vid_in_watchlater:\n video_page.open_by_id(self.TEST_VIDEO_ID)\n video_page.toggle_watch_later()\n\n router.Router().open_watchlater()\n\n self.assertNotIn(self.TEST_VIDEO_ID, video_list_page.video_ids,\n 'Didn`t remove video from watch later page on removing it from watch later')\n\n video_page.open_by_id(self.TEST_VIDEO_ID)\n\n video_page.toggle_watch_later()\n\n router.Router().open_watchlater()\n\n self.assertIn(self.TEST_VIDEO_ID, video_list_page.video_ids,\n 'Didn`t add video to watch later page on marking it watch later')\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_video_get_link(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoPage(self.driver)\n video_page_list = VideoListPage(self.driver)\n\n router.Router().open_new()\n video = video_page_list.wait_and_get_video_by_num(0)\n\n video_id = video.get_attribute('data-id')\n video_page.open_by_id(video_id)\n video_page.get_video_player().click()\n video_url = video_page.get_video_link()\n\n self.driver.get(video_url)\n video_page = VideoPage(self.driver)\n\n self.assertEqual(video_id, video_page.get_video_id())\n\n @unittest.skipIf(constants.SKIP_FINISHED_TESTS, '')\n def test_can_add_and_remove_like(self):\n main_page = MainPage(self.driver)\n main_page.go_to_videos()\n\n video_page = VideoPage(self.driver)\n video_page_list = VideoListPage(self.driver)\n\n router.Router().open_new()\n video = video_page_list.wait_and_get_video_by_num(0)\n\n video_id = video.get_attribute('data-id')\n video_page.open_by_id(video_id)\n\n like_button = video_page.get_like_button()\n like_button.click()\n\n self.driver.refresh()\n\n video_page = VideoPage(self.driver)\n\n like_button_container = video_page.get_like_button_container()\n entry = '__active' in like_button_container.get_attribute('class')\n\n video_page.get_like_button().click()\n\n self.assertTrue(entry)\n\n def tearDown(self):\n main_page = MainPage(self.driver)\n main_page.hard_clear_authentification()\n self.driver.refresh()\n if constants.MAKE_SCREENSHOTS:\n self.driver.save_screenshot(\n SCREENSHOT_PATH + 'sessionreset/{time}.png'.format(time=datetime.now().time().isoformat()))\n\n @classmethod\n def tearDownClass(cls):\n if constants.MAKE_SCREENSHOTS:\n cls.driver.save_screenshot(\n SCREENSHOT_PATH + 'clear/{time}.png'.format(time=datetime.now().time().isoformat()))\n cls.driver.quit()\n", "sub_path": "tests/video.py", "file_name": "video.py", "file_ext": "py", "file_size_in_byte": 16604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "constants.SCREENSHOT_PATH", "line_number": 18, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 30, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "name"}, {"api_name": "selenium.webdriver.Remote", "line_number": 32, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 32, "usage_type": "name"}, {"api_name": "constants.COMMAND_EXECUTOR", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities", "line_number": 34, "usage_type": "argument"}, {"api_name": "router.Router", "line_number": 37, "usage_type": "call"}, {"api_name": "pages.mainpage.MainPage", "line_number": 41, "usage_type": "call"}, {"api_name": "constants.MAKE_SCREENSHOTS", "line_number": 44, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "pages.mainpage.MainPage", "line_number": 50, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 53, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pages.video_list.VideoListPage.PATH", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 57, "usage_type": "name"}, {"api_name": "constants.MAKE_SCREENSHOTS", "line_number": 59, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "unittest.skipIf", "line_number": 48, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 65, "usage_type": "call"}, {"api_name": "pages.wall.WallPost", "line_number": 68, "usage_type": "call"}, {"api_name": "pages.wall.VideoSelector", "line_number": 71, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 63, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 79, "usage_type": "call"}, {"api_name": "pages.wall.WallPost", "line_number": 82, "usage_type": "call"}, {"api_name": "pages.wall.VideoSelector", "line_number": 87, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 77, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 99, "usage_type": "call"}, {"api_name": "pages.wall.WallPost", "line_number": 102, "usage_type": "call"}, {"api_name": "pages.wall.VideoSelector", "line_number": 106, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 94, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 115, "usage_type": "call"}, {"api_name": "pages.wall.WallPost", "line_number": 118, "usage_type": "call"}, {"api_name": "pages.wall.VideoSelector", "line_number": 125, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 110, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 134, "usage_type": "call"}, {"api_name": "pages.wall.WallPost", "line_number": 137, "usage_type": "call"}, {"api_name": "pages.wall.VideoSelector", "line_number": 143, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 129, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 149, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 151, "usage_type": "call"}, {"api_name": "pages.wall.WallPost", "line_number": 156, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 160, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 147, "usage_type": "call"}, {"api_name": "pages.mainpage.MainPage", "line_number": 168, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 171, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 166, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 184, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 187, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 189, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 182, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 214, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 217, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 219, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 212, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 236, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 234, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 234, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 253, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 256, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 257, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 259, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 251, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 271, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 274, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 275, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 277, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 269, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 269, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 284, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 287, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 288, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 295, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 307, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 282, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 282, "usage_type": "attribute"}, {"api_name": "pages.video.VideoPage", "line_number": 315, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 313, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 313, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 324, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 327, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 328, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 330, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 322, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 322, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 337, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 340, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 335, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 335, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 350, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 353, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 354, "usage_type": "attribute"}, {"api_name": "router.Router", "line_number": 356, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 357, "usage_type": "attribute"}, {"api_name": "router.Router", "line_number": 359, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 360, "usage_type": "attribute"}, {"api_name": "router.Router", "line_number": 362, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 363, "usage_type": "attribute"}, {"api_name": "router.Router", "line_number": 365, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 366, "usage_type": "attribute"}, {"api_name": "router.Router", "line_number": 368, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 369, "usage_type": "attribute"}, {"api_name": "router.Router", "line_number": 371, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 372, "usage_type": "attribute"}, {"api_name": "router.Router", "line_number": 374, "usage_type": "call"}, {"api_name": "constants.BASE_URL", "line_number": 375, "usage_type": "attribute"}, {"api_name": "unittest.skipIf", "line_number": 348, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 348, "usage_type": "attribute"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 379, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 380, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 382, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 390, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 399, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 377, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 377, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 406, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 409, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 410, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 412, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 421, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 404, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 404, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 427, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 430, "usage_type": "call"}, {"api_name": "pages.video_list.VideoListPage", "line_number": 431, "usage_type": "call"}, {"api_name": "router.Router", "line_number": 433, "usage_type": "call"}, {"api_name": "pages.video.VideoPage", "line_number": 444, "usage_type": "call"}, {"api_name": "unittest.skipIf", "line_number": 425, "usage_type": "call"}, {"api_name": "constants.SKIP_FINISHED_TESTS", "line_number": 425, "usage_type": "attribute"}, {"api_name": "pages.mainpage.MainPage", "line_number": 454, "usage_type": "call"}, {"api_name": "constants.MAKE_SCREENSHOTS", "line_number": 457, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 459, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 459, "usage_type": "name"}, {"api_name": "constants.MAKE_SCREENSHOTS", "line_number": 463, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 465, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 465, "usage_type": "name"}]} +{"seq_id": "394362055", "text": "# coding=utf-8\n\"\"\"\nNope\n\"\"\"\nfrom src.miz.mission import Group\nfrom utils.custom_logging import make_logger, Logged\n\nLOGGER = make_logger('flights')\n\n\nclass Flights(Logged):\n\n def __init__(self):\n super().__init__()\n self.d = {}\n\n def add_flight(self, flight):\n if not isinstance(flight, Flight):\n raise Exception('TODO')\n if flight.id in self.d.keys():\n raise Exception('TODO')\n self.d[flight.id] = flight\n\n def remove_flight(self, flight):\n if not isinstance(flight, Flight):\n raise Exception('TODO')\n if flight.id not in self.d.keys():\n raise Exception('TODO')\n del self.d[flight.id]\n\n def populate_from_miz(self, miz):\n print(miz.mission)\n for group in miz.mission.groups:\n if group.group_is_client_group:\n flight = Flight(group)\n self.add_flight(flight)\n\n @property\n def flights(self):\n for _, flight in self.d.items():\n yield flight\n\n\nclass Flight(Logged):\n\n def __init__(self, group):\n super().__init__()\n assert isinstance(group, Group)\n self.group = group\n if not self.group.group_is_client_group:\n raise Exception('Not a \"Client\" group')\n\n @property\n def id(self):\n return self.group.group_id\n\n @property\n def name(self):\n return self.group.group_name\n", "sub_path": "src/esme/flights.py", "file_name": "flights.py", "file_ext": "py", "file_size_in_byte": 1420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "utils.custom_logging.make_logger", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.custom_logging.Logged", "line_number": 11, "usage_type": "name"}, {"api_name": "utils.custom_logging.Logged", "line_number": 44, "usage_type": "name"}, {"api_name": "src.miz.mission.Group", "line_number": 48, "usage_type": "argument"}]} +{"seq_id": "467976212", "text": "from django.conf.urls import url, include\nfrom rest_framework.routers import DefaultRouter\nfrom notifications import views\n\n\nrouter = DefaultRouter()\n\nrouter.register(r'user/(?P\\d+)', views.NotificationViewSet, 'user_notifications')\n\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^(?P\\d+)/view/$', views.NotificationViewed.as_view()),\n url(r'^user/(?P\\d+)/view_all/$', views.NotificationViewAll.as_view()),\n]\n", "sub_path": "bounties_api/notifications/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "notifications.views.NotificationViewSet", "line_number": 8, "usage_type": "attribute"}, {"api_name": "notifications.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "notifications.views.NotificationViewed.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "notifications.views.NotificationViewed", "line_number": 13, "usage_type": "attribute"}, {"api_name": "notifications.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "notifications.views.NotificationViewAll.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "notifications.views.NotificationViewAll", "line_number": 14, "usage_type": "attribute"}, {"api_name": "notifications.views", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "502200734", "text": "#!/usr/bin/python3\nimport requests\nfrom lxml import html\n\npage = requests.get('https://intranet.hbtn.io/projects/261')\ntree = html.fromstring(page.content)\n\nmainheader = tree.xpath('//div/text()')\n\nprint(mainheader)\nprint(tree)\n", "sub_path": "holberton.py", "file_name": "holberton.py", "file_ext": "py", "file_size_in_byte": 228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 6, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "627479659", "text": "\"\"\"\nCreated on: 29/01/2018\nAuthor: Nikolaos Apostolakos\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport os\nimport numpy as np\nimport astropy.io.fits as fits\nfrom astropy.table import Table\n\nfrom nnpz.exceptions import *\n\n\nclass PhotometryProvider(object):\n \"\"\"This is a utility cass for handling NNPZ photometry FITS files\"\"\"\n\n\n def __checkFileFormat(self, filename):\n \"\"\"Checks that the file exists and that it has the correct HDUs\"\"\"\n\n if not os.path.exists(filename):\n raise FileNotFoundException('File {} does not exist'.format(filename))\n\n try:\n hdus = fits.open(filename)\n except:\n raise WrongFormatException('Failed to open {} as a FITS file'.format(filename))\n\n # Check that the first table is the NNPZ photometry\n if not 'NNPZ_PHOTOMETRY' in hdus:\n raise WrongFormatException('File {} does not contain NNPZ photometry'.format(filename))\n return hdus\n\n\n def __readFilterTransmissions(self, hdus, filter_list):\n \"\"\"Reads the filter transmissions from the hdus\"\"\"\n\n filter_data = {}\n for name in filter_list:\n if not name in hdus:\n filter_data[name] = None\n else:\n t = Table(hdus[name].data)\n trans = np.ndarray((len(t),2), dtype=np.float32)\n trans[:,0] = t.columns[0]\n trans[:,1] = t.columns[1]\n filter_data[name] = trans\n return filter_data\n\n\n def __readPhotometryData(self, phot_table, filter_list):\n \"\"\"Reads from the given table the photometry values for the given filters\n in a numpy array.\"\"\"\n\n data = np.zeros((len(phot_table), len(filter_list), 2), dtype=np.float32)\n for i, name in enumerate(filter_list):\n data[:,i,0] = phot_table[name]\n if name+'_ERR' in phot_table.colnames:\n data[:,i,1] = phot_table[name+'_ERR']\n return data\n\n\n def __init__(self, filename):\n \"\"\"Creates a new instance for accessing the given photometry file.\n\n Args:\n filename: The photometry file to read\n\n Raises:\n FileNotFoundException: If the file does not exist\n WrongFormatException: If the file is not a NNPZ photometry file\n \"\"\"\n\n # Check the format of the file\n hdus = self.__checkFileFormat(filename)\n\n # Get the type of photometry in the file\n self.__type = hdus[1].header.get('PHOTYPE')\n\n # Create a list with the filters in the file\n phot_table = Table(hdus['NNPZ_PHOTOMETRY'].data)\n self.__filter_list = [c for c in phot_table.colnames if c != 'ID' and not c.endswith('_ERR')]\n\n # Read the filter transmissions from the extension HDUs\n self.__filter_data = self.__readFilterTransmissions(hdus, self.__filter_list)\n\n # Get the IDs\n self.__ids = phot_table['ID']\n\n # Read the photometry values\n self.__phot_data = self.__readPhotometryData(phot_table, self.__filter_list)\n\n\n def getType(self):\n \"\"\"Returns the type of photometry in the file.\n\n The possible types are:\n - Photons: The photometry values are photon count rates, expressed in counts/s/cm^2\n - F_nu: The photometry values are energy flux densities expressed in erg/s/cm^2/Hz\n -F_nu_uJy: The photometry values are energy flux densities expressed in uJy\n -F_lambda: The photometry values are energy fluxes densities expressed in erg/s/cm^2/A\n -MAG_AB: The photometry values are AB magnitudes\n \"\"\"\n return self.__type\n\n\n def getFilterList(self):\n \"\"\"Returns a list with the available filter names\"\"\"\n return self.__filter_list\n\n\n def getFilterTransmission(self, filter):\n \"\"\"Returns the transmission of the given filter.\n\n Args:\n filter: The name of the filter to get the transmission for\n\n Returns: A 2D numpy array of single precision floats, where the first\n dimension has size same as the number of knots and the second has\n always size two, where the first element is the wavelength value of\n the knot (expressed in Angstrom) and the second is the filter\n transmission, in the range [0,1].\n\n Raises:\n UnknownNameException: If the file does not contain photometry for\n the given filter\n MissingDataException: If the HDU for the given filter transmission\n is missing\n \"\"\"\n if not filter in self.__filter_data:\n raise UnknownNameException('Unknown filter {}'.format(filter))\n if self.__filter_data[filter] is None:\n raise MissingDataException('File does not contain tranmission for {} filter'.format(filter))\n return self.__filter_data[filter]\n\n\n def getIds(self):\n \"\"\"Returns the IDs of the objects there is photometry in the file\"\"\"\n return self.__ids\n\n\n def getData(self, *filter_list):\n \"\"\"Returns an array with the photometry data for the given bands.\n\n Args:\n filter_list: The filter names to get the data for. If no filter is\n given, the result is returned for the full filter list, in the same\n order as returned by the getFilterList() method.\n\n Returns:\n A three dimensional numpy array of single precision floats where the\n first dimension has the same size as the number of objects the file\n contains photometries for, the second axis has same size as the\n given bands and the third axis has always size two, where the first\n element represents the photometry value and te second the uncertainty.\n\n Raises:\n UnknownNameException: If the file does not contain photometry for\n any of the given filters\n\n If the file does not contain uncertainty columns for some filters, the\n returned array will contain zero values for these uncertainties. The\n order of the second axis of the result is the same as the passed filter\n names.\n \"\"\"\n\n if len(filter_list) == 0:\n filter_list = self.getFilterList()\n\n # Create the array to store the results\n result = np.zeros((len(self.__ids), len(filter_list), 2), dtype=np.float32)\n\n for user_i, name in enumerate(filter_list):\n # Find the index of the filter\n try:\n local_i = self.__filter_list.index(name)\n except ValueError:\n raise UnknownNameException('File does not contain photometry for {}', name)\n\n # Populate the result\n result[:,user_i,:] = self.__phot_data[:,local_i,:]\n\n return result", "sub_path": "nnpz/photometry/PhotometryProvider.py", "file_name": "PhotometryProvider.py", "file_ext": "py", "file_size_in_byte": 6783, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 27, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 27, "usage_type": "name"}, {"api_name": "astropy.table.Table", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 57, "usage_type": "attribute"}, {"api_name": "astropy.table.Table", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 173, "usage_type": "attribute"}]} +{"seq_id": "144725061", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n# Importa los datos, el usecols se asegura de no tomar la primera columna (nombre de la galaxia), pues en un string que produce error. \ndata = np.loadtxt('RotationCurve_F571-8.txt', usecols = (1,2,3,4,5,6,7))\n\n# Guarda las columnas en arreglos \nPrad = data[:,1]\nvGas = data[:,2]\nvDisk = data[:,3]\nvBul = data[:,4]\nVel = data[:,5]\n\n# Realiza las graficas, se hace un scatter para mostrar los puntos y un plot para unirlos y facilitar la visualizacion\nplt.scatter(Prad, Vel, c = 'black')\nplt.scatter(Prad, vGas + vDisk + vBul, c = 'green')\nplt.plot(Prad, Vel, c = 'black', label = 'Velocidad media')\nplt.plot(Prad, vGas + vDisk + vBul, c = 'green', label = 'Suma de velocidades esperadas')\n\n# Rotula los ejes y ubica la leyenda en el cuarto cuadrante de la grafica de tal forma que no obstruya los puntos\nplt.xlabel('Radio fisico (kpc)')\nplt.ylabel('Velocidad (km/s)')\nplt.legend(loc = 4)\n\n# Guarda y cierra la figura\nplt.savefig('RotationCurvePlot.pdf')\nplt.close()\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "Teaching/20171/MetodosCompu20171/Semana4/Tarea1/hw1_sol/q1/PLOTS_RotationCurves.py", "file_name": "PLOTS_RotationCurves.py", "file_ext": "py", "file_size_in_byte": 1030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.loadtxt", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "565478526", "text": "import click\nfrom hsds_tools import Boto_Load\n\n\n@click.command()\n@click.argument('h5_file', type=click.Path(exists=True))\n@click.argument('config_file', type=click.Path(exists=True))\n@click.argument('dset_name', type=str)\n@click.option('--temp_dir', '-temp', default=None,\n type=click.Path(exists=True),\n help='Directory to store temporary files during processing')\n@click.option('--log_file', '-log', default=None, type=click.Path(),\n help='Path to file to use for logging')\ndef load_nsrdb(h5_file, config_file, dset_name, temp_dir, log_file):\n with Boto_Load(h5_file, config_file, log_file=log_file) as hsds:\n hsds.load_dset(dset_name, temp_dir=temp_dir)\n\n\nif __name__ == '__main__':\n load_nsrdb()\n", "sub_path": "load_nsrdb/peregrine_dir_load_nsrdb.py", "file_name": "peregrine_dir_load_nsrdb.py", "file_ext": "py", "file_size_in_byte": 752, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "hsds_tools.Boto_Load", "line_number": 15, "usage_type": "call"}, {"api_name": "click.command", "line_number": 5, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 6, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 7, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 8, "usage_type": "call"}, {"api_name": "click.option", "line_number": 9, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "click.option", "line_number": 12, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "193854605", "text": "from werkzeug.utils import secure_filename\nfrom flask import Flask,render_template,request,url_for,redirect,Response,flash\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\n\n\napp = Flask(__name__)\napp.secret_key = \"dont tell anyone\"\n\nproject_dir = os.path.dirname(os.path.abspath(__file__))\ndatabase_file = \"sqlite:///{}\".format(os.path.join(project_dir, \"img.db\"))\n\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = database_file\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"]=False\ndb = SQLAlchemy(app)\n\n\n\nclass data(db.Model):\n id=db.Column(db.INTEGER, primary_key=True)\n email=db.Column(db.String(100))\n first_name=db.Column(db.String(100))\n last_name=db.Column(db.String(100))\n phone_number=db.Column(db.String(100))\n contact_address=db.Column(db.Text)\n office_hours=db.Column(db.Text)\n assigned_time=db.Column(db.Text)\n received_assigned_time=db.Column(db.Text)\n img = db.Column(db.LargeBinary, nullable=False)\n name = db.Column(db.Text, nullable=False)\n mimetype = db.Column(db.Text, nullable=False)\n\n\n@app.route('/',methods=['GET'])\ndef index():\n # flash(\"hello\")\n db.create_all()\n return render_template('question.html')\n\n\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n email=request.form['email']\n first_name=request.form['first_name']\n last_name=request.form['last_name']\n phone_number=request.form['phone_number']\n contact_address=request.form['contact_address']\n office_hours=request.form['office_hours']\n assigned_time=request.form['assigned_time']\n received_assigned_time=request.form['received_assigned_time']\n pic = request.files['pic']\n if not pic:\n return 'No File selected!', 400\n\n filename = secure_filename(pic.filename)\n mimetype = pic.mimetype\n if not filename or not mimetype:\n return 'Bad upload!', 400\n\n img = data(email=email,first_name=first_name,last_name=last_name,phone_number=phone_number,contact_address=contact_address,office_hours=office_hours,assigned_time=assigned_time,received_assigned_time=received_assigned_time,img=pic.read(), name=filename, mimetype=mimetype)\n db.session.add(img)\n db.session.commit()\n # flash(\"Data inserted Successfully\")\n return redirect(url_for('index'))\n\n\n@app.route(\"/response\")\ndef response():\n all_data=data.query.all()\n\n return render_template('response.html',Data=all_data)\n\n\n\n\n@app.route('/view/')\ndef get_data(id):\n img = data.query.filter_by(id=id).first()\n if not img:\n return 'File Not Found!', 404\n\n return Response(img.img,mimetype=img.mimetype)\n\n@app.route('/delete//',methods=['GET','POST'])\ndef delete(id):\n my_data=data.query.get(id)\n db.session.delete(my_data)\n db.session.commit()\n flash=('hello')\n return redirect(url_for('response'))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "venv/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2858, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "614134999", "text": "import os\nimport simplejson\n\nimport pysolr\nfrom flask import Flask, render_template, request\n\n\napp = Flask(__name__)\nDEBUG = os.environ.get('DEBUG', False)\nPORT = os.environ.get('PHOTON_PORT', 5001)\nHOST = os.environ.get('PHOTON_HOST', '127.0.0.1')\nsolr = pysolr.Solr(os.environ.get(\"SOLR_ENDPOINT\", 'http://localhost:8983/solr/'), timeout=10)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/search/')\ndef search():\n bbox = request.args['center'].split(',')\n params = {\n \"hl\": 'true',\n \"rows\": 10,\n \"qt\": \"english_loc\",\n \"pt\": \"{0},{1}\".format(*bbox),\n \"qf\": \"name^4.0 city^2.0\",\n \"fq\": \"{{!geofilt pt={0},{1} sfield=coordinate d=40}}\".format(*bbox),\n }\n from pprint import pprint\n pprint(params)\n q = request.args.get('q', '*') # \"(name:{0} OR city:{0}) AND -osm_key:boundary\".format(request.args.get('q', '*'))\n results = solr.search(q, **params)\n return simplejson.dumps({\n \"docs\": results.docs,\n \"highlight\": results.highlighting\n })\n\nif __name__ == \"__main__\":\n app.run(debug=DEBUG, port=int(PORT), host=HOST)\n", "sub_path": "src/main/python/demo/photon/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1139, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pysolr.Solr", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "385788151", "text": "#!/usr/bin/env python\n'''Quick and dirty hack to manage redis server in Mac private deployments.\ni.e. start/stop/restart ... like /etc/init.d/redis\n'''\n\n__author__ = 'Andreas Pfeiffer'\n__copyright__ = 'Copyright 2013, CERN CMS'\n__credits__ = ['Miguel Ojeda', 'Andreas Pfeiffer']\n__license__ = 'Unknown'\n__maintainer__ = 'Andreas Pfeiffer'\n__email__ = 'andreas.pfeiffer@cern.ch'\n\n\nimport os\nimport sys\nimport subprocess\nimport logging\nimport optparse\n\n\ndef getOptions():\n\n parser = optparse.OptionParser(usage =\n 'Usage: %prog command [options]\\n'\n '\\n'\n 'Examples:\\n'\n ' %prog start\\n'\n ' %prog stop\\n'\n ' %prog restart'\n )\n\n (options, args) = parser.parse_args()\n\n if len(args) != 1:\n parser.print_help()\n sys.exit(2)\n\n return args[0], vars(options)\n\n\ndef check_output(*popenargs, **kwargs):\n '''Mimics subprocess.check_output() in Python 2.6\n '''\n\n process = subprocess.Popen(*popenargs, **kwargs)\n stdout = process.communicate()\n returnCode = process.returncode\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n if returnCode:\n raise subprocess.CalledProcessError(returnCode, cmd)\n return stdout\n\n\ndef execute(command):\n '''Executes a command in a shell:\n - Allowing input\n - Without redirecting stderr\n - Raises an exception if it returns with a non-zero exit code\n - Returns the stdout\n '''\n\n # Don't redirect stderr: That way we can see the error\n # if the command does not finish correctly\n logging.info('Executing: ' + command)\n return check_output(command, shell=True)\n\n\ndef isRunning():\n try:\n execute('pgrep redis-server >/dev/null')\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef start():\n if isRunning():\n logging.warning('Redis is already running.')\n return 1\n\n subprocess.Popen('nohup redis-server >/data/logs/redis.log 2>&1 &', shell = True)\n\n\ndef stop():\n if not isRunning():\n logging.warning('Redis is not running.')\n return 1\n\n execute('redis-cli shutdown')\n\n\ndef restart():\n stop()\n return start()\n\n\ndef main():\n '''Entry point.\n '''\n\n command, options = getOptions()\n\n if 'start' == command:\n return start()\n elif 'stop' == command:\n return stop()\n elif 'restart' == command:\n return restart()\n else:\n logging.error('Wrong command.')\n return 2\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n format = '[%(asctime)s] %(levelname)s: %(message)s',\n level = logging.INFO,\n )\n\n sys.exit(main())\n\n", "sub_path": "keeper/manageRedis.py", "file_name": "manageRedis.py", "file_ext": "py", "file_size_in_byte": 2669, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "optparse.OptionParser", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 36, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 45, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 66, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 74, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 80, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 112, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 117, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "360294843", "text": "# -*- coding: utf-8 -*-\n\nfrom base64 import b64encode\nfrom config import DEBUG, SQLALCHEMY_DATABASE_URI, SQLALCHEMY_COMMIT_ON_TEARDOWN, REDIS_PORT, REDIS_DB\nfrom gevent.wsgi import WSGIServer\nfrom uuid import uuid4\nfrom flask import redirect, url_for\nfrom flask import Flask\nfrom util.hack import nullpool_SQLAlchemy\nimport redis\n\n\nclass App(object):\n def __init__(self):\n self.app = Flask(__name__)\n self.app.secret_key = b64encode(uuid4().hex)\n self.app.debug = DEBUG\n self.app.config['MAX_CONTENT_LENGTH'] = 32 * 1024 * 1024\n self.app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI\n self.app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = SQLALCHEMY_COMMIT_ON_TEARDOWN\n\n def register_api_blueprint(self):\n from api import api\n self.app.register_blueprint(api, url_prefix='/api')\n\n def register_admin_blueprint(self):\n from admin import admin\n self.app.register_blueprint(admin, url_prefix='/admin')\n\n def run(self):\n self.register_api_blueprint()\n self.register_admin_blueprint()\n if DEBUG:\n from config import DEBUG_IP, DEBUG_PORT\n self.app.debug = True\n self.app.run(host=DEBUG_IP, port=DEBUG_PORT)\n else:\n from config import NONDEBUG_IP, NONDEBUG_PORT\n self.app.debug = False\n self.app.run(host=NONDEBUG_IP, port=NONDEBUG_PORT)\n\n\napp = App()\ndb = nullpool_SQLAlchemy(app.app)\nredisClient = redis.Redis(host='127.0.0.1', port=REDIS_PORT, db=REDIS_DB)\n\n\n@app.app.route('/', methods=[\"GET\"])\ndef index():\n return redirect('/admin/')\n\nif __name__ == '__main__':\n app.run()\n", "sub_path": "backend/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 16, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 16, "usage_type": "call"}, {"api_name": "config.DEBUG", "line_number": 17, "usage_type": "name"}, {"api_name": "config.SQLALCHEMY_DATABASE_URI", "line_number": 19, "usage_type": "name"}, {"api_name": "config.SQLALCHEMY_COMMIT_ON_TEARDOWN", "line_number": 20, "usage_type": "name"}, {"api_name": "api.api", "line_number": 24, "usage_type": "name"}, {"api_name": "admin.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "config.DEBUG", "line_number": 33, "usage_type": "name"}, {"api_name": "config.DEBUG_IP", "line_number": 36, "usage_type": "name"}, {"api_name": "config.DEBUG_PORT", "line_number": 36, "usage_type": "name"}, {"api_name": "config.NONDEBUG_IP", "line_number": 40, "usage_type": "name"}, {"api_name": "config.NONDEBUG_PORT", "line_number": 40, "usage_type": "name"}, {"api_name": "{'api': 'api.api', 'admin': 'admin.admin', 'DEBUG_IP': 'config.DEBUG_IP', 'DEBUG_PORT': 'config.DEBUG_PORT', 'NONDEBUG_IP': 'config.NONDEBUG_IP', 'NONDEBUG_PORT': 'config.NONDEBUG_PORT'}", "line_number": 43, "usage_type": "call"}, {"api_name": "util.hack.nullpool_SQLAlchemy", "line_number": 44, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 45, "usage_type": "call"}, {"api_name": "config.REDIS_PORT", "line_number": 45, "usage_type": "name"}, {"api_name": "config.REDIS_DB", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "417162785", "text": "import os\nimport configparser\n# import getpathInfo\nfrom utils.getpathInfo import get_Path\n# import getpathInfo#引入我们自己的写的获取路径的类\n \n# path = getpathInfo().get_Path()#调用实例化,还记得这个类返回的路径为 E:\\VisualStudio\\UnitestTestHttp\\utils\npath = get_Path()#调用实例化,还记得这个类返回的路径为 E:\\VisualStudio\\UnitestTestHttp\\utils\nconfig_path = os.path.join(path, 'config.ini')#这句话是在path路径下再加一级,最后变成E:\\VisualStudio\\UnitestTestHttp\\utils\\config.ini\nconfig = configparser.ConfigParser()#调用外部的读取配置文件的方法\nconfig.read(config_path, encoding='utf-8')\n \nclass ReadConfig():\n \n def get_http(self, name):\n value = config.get('HTTP', name)\n return value\n def get_email(self, name):\n value = config.get('EMAIL', name)\n return value\n def get_mysql(self, name):#写好,留以后备用。但是因为我们没有对数据库的操作,所以这个可以屏蔽掉\n value = config.get('DATABASE', name)\n return value\n \n \nif __name__ == '__main__':#测试一下,我们读取配置文件的方法是否可用\n print('HTTP中的baseurl值为:', ReadConfig().get_http('baseurl'))\n print('EMAIL中的开关on_off值为:', ReadConfig().get_email('on_off'))\n print('mysql中的user值为:', ReadConfig().get_mysql('user'))\n print(config_path)", "sub_path": "UnitestTestHttp/utils/readConfig.py", "file_name": "readConfig.py", "file_ext": "py", "file_size_in_byte": 1419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "utils.getpathInfo.get_Path", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "581436387", "text": "\"\"\"Device manager.\"\"\"\nimport asyncio\nimport logging\n\nfrom ..address import Address\nfrom ..device_types.device_base import Device\nfrom ..device_types.modem_base import ModemBase\nfrom ..device_types.x10_base import X10DeviceBase\nfrom ..managers.saved_devices_manager import SavedDeviceManager\nfrom ..subscriber_base import SubscriberBase\nfrom ..x10_address import X10Address\nfrom .device_id_manager import DeviceId, DeviceIdManager\nfrom .device_link_manager import DeviceLinkManager\nfrom .utils import create_device, create_x10_device\n\nDEVICE_INFO_FILE = \"insteon_devices.json\"\n_LOGGER = logging.getLogger(__name__)\n\n\n# TODO remove devices\nclass DeviceManager(SubscriberBase):\n \"\"\"Manages the list of active devices.\"\"\"\n\n def __init__(self):\n \"\"\"Init the DeviceManager class.\"\"\"\n super().__init__(subscriber_topic=\"device_added\")\n self._devices = {}\n self._modem = None\n self._id_manager = DeviceIdManager()\n self._id_manager.subscribe(self._device_identified)\n self._loading_saved_lock = asyncio.Lock()\n self._link_manager = DeviceLinkManager(self)\n\n def __getitem__(self, address) -> Device:\n \"\"\"Return a a device from the device address.\"\"\"\n try:\n address = Address(address)\n except ValueError:\n address = X10Address(address)\n return self._devices.get(address)\n\n def __iter__(self):\n \"\"\"Return an iterator of device addresses.\"\"\"\n for address in self._devices:\n yield address\n\n def __setitem__(self, address, device):\n \"\"\"Add a device to the device list.\"\"\"\n _LOGGER.info(\"Adding device to INSTEON devices list: %s\", address.id)\n if not isinstance(device, (Device, DeviceId, X10DeviceBase)):\n raise ValueError(\"Device must be a DeviceId or a Device type.\")\n\n if isinstance(device, DeviceId):\n device = create_device(device)\n\n self._devices[device.address] = device\n if isinstance(device, Device):\n self._id_manager.set_device_id(\n device.address, device.cat, device.subcat, device.firmware\n )\n self._call_subscribers(address=device.address.id)\n\n def __len__(self):\n \"\"\"Return the number of devices.\"\"\"\n return len(self._devices)\n\n def get(self, address) -> Device:\n \"\"\"Return a device from an address.\"\"\"\n try:\n address = Address(address)\n except ValueError:\n address = X10Address(address)\n return self._devices.get(address)\n\n def pop(self, address):\n \"\"\"Remove a device from the device list.\"\"\"\n try:\n address = Address(Address)\n except ValueError:\n address = X10Address(address)\n self._devices.pop(address)\n\n @property\n def modem(self):\n \"\"\"Return the Insteon Modem.\"\"\"\n return self._modem\n\n @modem.setter\n def modem(self, modem):\n \"\"\"Set the Insteon Modem.\"\"\"\n if not isinstance(modem, ModemBase):\n raise ValueError(\"Must be an Insteon Modem object\")\n self._modem = modem\n self._devices[self._modem.address] = self._modem\n\n @property\n def id_manager(self):\n \"\"\"Return the ID manager instance.\"\"\"\n return self._id_manager\n\n def set_id(self, address: Address, cat: int, subcat: int, firmware: int):\n \"\"\"Add a device override to identify the device information.\n\n Typical use is to identify devices that do not respond to an ID Request\n such as a battery opperated device.\n\n \"\"\"\n address = Address(address)\n device = self[address]\n if device and device.cat == cat and device.subcat == subcat:\n return\n self._id_manager.set_device_id(address, cat, subcat, firmware)\n\n async def async_identify_device(self, address: Address):\n \"\"\"Identify a device.\n\n The device will be placed into the unknown device list to be identified.\n\n If the device has already been identified, this method will remove the device\n from the known device list. This is typically used when a `set_id` command has\n been run to create a device override. The `async_reidentify_device` command will\n reset that override and allow normal device identification to run.\n \"\"\"\n self._devices.pop(Address(address))\n await self._id_manager.async_id_device(address=address, refresh=True)\n\n def add_x10_device(\n self,\n housecode: str,\n unitcode: int,\n x10_feature: str,\n steps: int = 22,\n max_level: int = 255,\n ):\n \"\"\"Add an X10 device.\"\"\"\n device = create_x10_device(housecode, unitcode, x10_feature, steps, max_level)\n if device:\n self[device.address] = device\n return device\n\n async def async_close(self):\n \"\"\"Close the device ID listener.\"\"\"\n self._id_manager.close()\n\n async def async_load(self, workdir=\"\", id_devices=1, load_modem_aldb=1):\n \"\"\"Load devices from the `insteon_devices.yaml` file and device overrides.\n\n Parameters:\n workdir: Directory name to find the `insteon_devices.json` file\n\n id_devices: Indicate if devices should be identified using ID Request\n 0: No devices are identified\n 1: Unknown devices are identified\n 2: All devices are identified\n (default=1)\n\n load_modem_aldb: Indicate if the Modem ALDB should be loaded\n 0: Do not load\n 1: Load if not loaded from save file\n 2: Load\n (default=1)\n\n The Modem ALDB is loaded if `refresh` is True or if the saved file has no devices.\n\n \"\"\"\n if workdir:\n async with self._loading_saved_lock:\n saved_devices_manager = SavedDeviceManager(workdir, self.modem)\n devices = await saved_devices_manager.async_load()\n for address in devices:\n self[address] = devices[address]\n\n if load_modem_aldb == 0:\n load_modem_aldb = False\n elif load_modem_aldb == 2:\n load_modem_aldb = True\n else:\n load_modem_aldb = not self._modem.aldb.is_loaded\n\n if load_modem_aldb:\n await self._modem.aldb.async_load()\n\n for mem_addr in self._modem.aldb:\n rec = self._modem.aldb[mem_addr]\n if rec.target != Address(\"000000\"):\n self._id_manager.append(rec.target)\n\n if id_devices:\n id_all = id_devices == 2\n await self._id_manager.async_id_devices(refresh=id_all)\n\n async def async_save(self, workdir):\n \"\"\"Save devices to a device information file.\"\"\"\n saved_devices_manager = SavedDeviceManager(workdir, self.modem)\n await saved_devices_manager.async_save(self._devices)\n\n def _device_identified(self, device_id: DeviceId):\n \"\"\"Device identified by device ID manager.\"\"\"\n if self._loading_saved_lock.locked():\n return\n if device_id.cat is not None:\n device = create_device(device_id)\n if self[device_id.address]:\n # If the device is already in the devices list and the cat and subcat\n # are the same, do not add the device again\n if (\n device_id.cat == self[device_id.address].cat\n and device_id.subcat == self._devices[device_id.address].subcat\n ):\n return\n self[device_id.address] = device\n if device_id.cat != 0x03:\n asyncio.ensure_future(device.async_get_engine_version())\n asyncio.ensure_future(self.async_setup_device(device))\n _LOGGER.debug(\"Device %s added\", device.address)\n\n async def async_setup_device(self, device):\n \"\"\"Set up device.\"\"\"\n await device.aldb.async_load(refresh=True)\n await device.async_read_op_flags()\n await device.async_read_ext_properties()\n await device.async_add_default_links()\n", "sub_path": "pyinsteon/managers/device_manager.py", "file_name": "device_manager.py", "file_ext": "py", "file_size_in_byte": 8117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "subscriber_base.SubscriberBase", "line_number": 21, "usage_type": "name"}, {"api_name": "device_id_manager.DeviceIdManager", "line_number": 29, "usage_type": "call"}, {"api_name": "asyncio.Lock", "line_number": 31, "usage_type": "call"}, {"api_name": "device_link_manager.DeviceLinkManager", "line_number": 32, "usage_type": "call"}, {"api_name": "address.Address", "line_number": 37, "usage_type": "call"}, {"api_name": "x10_address.X10Address", "line_number": 39, "usage_type": "call"}, {"api_name": "device_types.device_base.Device", "line_number": 34, "usage_type": "name"}, {"api_name": "address.id", "line_number": 49, "usage_type": "attribute"}, {"api_name": "device_types.device_base.Device", "line_number": 50, "usage_type": "name"}, {"api_name": "device_id_manager.DeviceId", "line_number": 50, "usage_type": "name"}, {"api_name": "device_types.x10_base.X10DeviceBase", "line_number": 50, "usage_type": "name"}, {"api_name": "device_id_manager.DeviceId", "line_number": 53, "usage_type": "argument"}, {"api_name": "utils.create_device", "line_number": 54, "usage_type": "call"}, {"api_name": "device_types.device_base.Device", "line_number": 57, "usage_type": "argument"}, {"api_name": "address.Address", "line_number": 70, "usage_type": "call"}, {"api_name": "x10_address.X10Address", "line_number": 72, "usage_type": "call"}, {"api_name": "device_types.device_base.Device", "line_number": 67, "usage_type": "name"}, {"api_name": "address.Address", "line_number": 78, "usage_type": "call"}, {"api_name": "x10_address.X10Address", "line_number": 80, "usage_type": "call"}, {"api_name": "device_types.modem_base.ModemBase", "line_number": 91, "usage_type": "argument"}, {"api_name": "address.Address", "line_number": 101, "usage_type": "name"}, {"api_name": "address.Address", "line_number": 108, "usage_type": "call"}, {"api_name": "address.Address", "line_number": 114, "usage_type": "name"}, {"api_name": "address.Address", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.create_x10_device", "line_number": 136, "usage_type": "call"}, {"api_name": "managers.saved_devices_manager.SavedDeviceManager", "line_number": 168, "usage_type": "call"}, {"api_name": "address.Address", "line_number": 185, "usage_type": "call"}, {"api_name": "managers.saved_devices_manager.SavedDeviceManager", "line_number": 194, "usage_type": "call"}, {"api_name": "device_id_manager.DeviceId", "line_number": 197, "usage_type": "name"}, {"api_name": "utils.create_device", "line_number": 202, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 213, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "168291477", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nfig = plt.figure( figsize =( 8 , 3 ))\r\nax1 = fig.add_subplot( 151 , projection = '3d' )\r\nax2 = fig.add_subplot( 152 , projection = '3d' )\r\nax3 = fig.add_subplot( 153 , projection = '3d' )\r\nax4 = fig.add_subplot( 154 , projection = '3d' )\r\nax5 = fig.add_subplot( 155 , projection = '3d' )\r\n\r\n_x = np.arange( 4 )\r\n_y = np.arange( 5 )\r\n_xx, _yy = np.meshgrid(_x, _y)\r\nx=_xx.ravel()\r\ny=_yy.ravel()\r\ntop = x + y\r\nbottom = np.zeros_like(top)\r\nwidth = depth = 1\r\nax1.bar3d(x, y, bottom, width, depth, top, shade = True ,color = 'b')\r\nax1.set_title('zacieniony')\r\nax2.bar3d(x, y, bottom, width, depth, top, shade = True , alpha = 0.5, color = 'r')\r\nax2.set_title('przeswitujacy')\r\nax3.bar3d(x, y, bottom, width, depth, top, shade = False,color='g')\r\nax3.set_title('nie zacieniony')\r\nax4.bar3d(x, y, bottom, width, depth, top, shade = False,color = 'pink', edgecolor=['k','c','w'])\r\nax4.set_title('z krawiedziami')\r\nax5.bar3d(x, y, bottom, width, depth, top, shade = True, color = 'm', zsort='max')\r\nax5.set_title('z krawiedziami')\r\nplt.show()", "sub_path": "Tomasz_Maćkiewicz_155299_11/zadanie 4.py", "file_name": "zadanie 4.py", "file_ext": "py", "file_size_in_byte": 1130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "320805683", "text": "\nimport os\nimport threading\nimport glob\nimport shutil\nfrom tqdm import tqdm\nimport multiprocessing\nimport cv2\n\nNUM_THREADS = 5\nVIDEO_ROOT = \"/4T/zhujian/dataset/hmdb51/\" # Downloaded webm videos\nFRAME_ROOT = \"/home/zhujian/dataset/hmdb51_frames/\" # Directory for extracted frames\n\n\ndef split(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef extract(video_path, out_path, tmpl='%06d.jpg'):\n \n cmd = 'ffmpeg -i \\\"{}\\\" -threads 1 -vf scale=-1:256 -q:v 0 \\\"{}/%06d.jpg\\\" -loglevel error'.format(video_path,\n out_path)\n \n os.system(cmd)\n # video = cv2.VideoCapture(video_path)\n\n # i = 1\n # while 1:\n # flag, img = video.read()\n # if not flag:\n # break\n # cv2.imwrite(os.path.join(out_path,'%06d.jpg' % i), img)\n # i += 1\n # video.release()\n\ndef target(video_list):\n for video in tqdm(video_list):\n # name = video.split('.')[-2]\n video_classes = video\n a = glob.glob(os.path.join(VIDEO_ROOT, video_classes, '*'))\n for i in a:\n if os.path.isdir(i):\n continue\n name = i.split('/')[-1][:-4]\n # print(name)\n try:\n os.makedirs(os.path.join(FRAME_ROOT, video_classes, name))\n except:\n pass\n # print('exsit path %s' % os.path.join(FRAME_ROOT, video_classes, name) )\n\n # img_l = glob.glob(os.path.join(FRAME_ROOT, video_classes, name, '*'))\n extract(i, os.path.join(FRAME_ROOT, video_classes, name))\n\n\nif __name__ == '__main__':\n if not os.path.exists(VIDEO_ROOT):\n raise ValueError('Please download videos and set VIDEO_ROOT variable.')\n if not os.path.exists(FRAME_ROOT):\n os.makedirs(FRAME_ROOT)\n\n video_list = os.listdir(VIDEO_ROOT)\n print(len(video_list))\n target(video_list)\n ", "sub_path": "tools/vid2img_hmdb51.py", "file_name": "vid2img_hmdb51.py", "file_ext": "py", "file_size_in_byte": 2089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.system", "line_number": 26, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 39, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 62, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "118334232", "text": "import socket\nimport traceback\nimport logging\n\nfrom threading import Thread\nfrom ircbot.message import Message\n\n\nclass IRCClient(object):\n BUFFER_SIZE = 2048\n address = None\n nick = None\n realname = None\n command_hook = None\n command_prefix = None\n channels = None\n sock = None\n\n def _main_thread(self):\n logging.info(\"Connected\")\n self.send(Message(None, \"NICK\", [self.nick]))\n self.send(Message(None, \"USER\", [self.nick, \"0\", \"*\", self.realname]))\n if self.channels:\n for channel in self.channels:\n self.send(Message(None, \"JOIN\", [channel]))\n while True:\n data = b\"\"\n while True:\n recv = self.sock.recv(self.BUFFER_SIZE)\n if len(recv):\n data += recv\n if len(recv) < self.BUFFER_SIZE:\n break\n messages = data.split(b\"\\r\\n\")\n for msg in messages:\n if not len(msg):\n continue\n try:\n message = Message.parse(msg.decode(\"utf-8\"))\n logging.debug(\"Message received: %s\", repr(message))\n except (IndexError, ValueError):\n logging.warning(\"Unable to parse message: %s\", traceback.format_exc())\n source = message.prefix.nickname\n if message.command in [\"PRIVMSG\", \"NOTICE\"]:\n target = message.params[0]\n text = message.params[1]\n if text.startswith(self.command_prefix):\n self.command_hook(self, source, target, text.split(\" \"))\n elif message.command == \"PING\":\n self.send(Message(None, \"PONG\", message.params))\n\n def __init__(self, address, nick, realname=\"IRC Bot\", command_hook=None, command_prefix=\"!\", channels=None):\n self.address, self.nick, self.realname, self.command_hook, self.command_prefix, self.channels =\\\n address, nick, realname, command_hook, command_prefix, channels\n\n def start(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(self.address)\n main_thread = Thread(target=self._main_thread)\n main_thread.start()\n # main_thread.join()\n\n def send(self, msg):\n logging.debug(\"Message sent: %s\", repr(msg))\n tosend = msg.encode()\n self.sock.send(tosend)\n\n def privmsg(self, to, text):\n self.send(Message(None, \"PRIVMSG\", [to, text]))\n", "sub_path": "ircbot/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.info", "line_number": 20, "usage_type": "call"}, {"api_name": "ircbot.message.Message", "line_number": 21, "usage_type": "call"}, {"api_name": "ircbot.message.Message", "line_number": 22, "usage_type": "call"}, {"api_name": "ircbot.message.Message", "line_number": 25, "usage_type": "call"}, {"api_name": "ircbot.message.Message.parse", "line_number": 39, "usage_type": "call"}, {"api_name": "ircbot.message.Message", "line_number": 39, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 42, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 42, "usage_type": "call"}, {"api_name": "ircbot.message.Message", "line_number": 50, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 57, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 57, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 57, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 64, "usage_type": "call"}, {"api_name": "ircbot.message.Message", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "277041197", "text": "from PyQt5 import QtCore, QtGui, QtWidgets\nimport os\nimport numpy as np\nimport cv2\nfrom sklearn.cluster import KMeans\nimport pickle\n\n# Global Variables\npictures = []\nredAverage = 128\nredRange = 15\ngreenAverage = 128\ngreenRange = 15\nblueAverage = 128\nblueRange = 15\nnum = 1\nindex = 0\n\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1110, 950)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n\n self.ogImg1 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg1.setGeometry(QtCore.QRect(20, 20, 150, 150))\n self.ogImg1.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg1.setText(\"\")\n self.ogImg1.setObjectName(\"ogImg1\")\n\n self.exImg1 = QtWidgets.QLabel(self.centralwidget)\n self.exImg1.setGeometry(QtCore.QRect(190, 20, 150, 150))\n self.exImg1.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg1.setText(\"\")\n self.exImg1.setObjectName(\"exImg1\")\n\n self.ogImg2 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg2.setGeometry(QtCore.QRect(380, 20, 150, 150))\n self.ogImg2.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg2.setText(\"\")\n self.ogImg2.setObjectName(\"ogImg2\")\n\n self.exImg2 = QtWidgets.QLabel(self.centralwidget)\n self.exImg2.setGeometry(QtCore.QRect(550, 20, 150, 150))\n self.exImg2.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg2.setText(\"\")\n self.exImg2.setObjectName(\"exImg2\")\n\n self.ogImg3 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg3.setGeometry(QtCore.QRect(740, 20, 150, 150))\n self.ogImg3.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg3.setText(\"\")\n self.ogImg3.setObjectName(\"ogImg3\")\n\n self.exImg3 = QtWidgets.QLabel(self.centralwidget)\n self.exImg3.setGeometry(QtCore.QRect(910, 20, 150, 150))\n self.exImg3.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg3.setText(\"\")\n self.exImg3.setObjectName(\"exImg3\")\n\n self.ogImg4 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg4.setGeometry(QtCore.QRect(20, 220, 150, 150))\n self.ogImg4.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg4.setText(\"\")\n self.ogImg4.setObjectName(\"ogImg4\")\n\n self.exImg4 = QtWidgets.QLabel(self.centralwidget)\n self.exImg4.setGeometry(QtCore.QRect(190, 220, 150, 150))\n self.exImg4.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg4.setText(\"\")\n self.exImg4.setObjectName(\"exImg4\")\n\n self.ogImg5 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg5.setGeometry(QtCore.QRect(380, 220, 150, 150))\n self.ogImg5.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg5.setText(\"\")\n self.ogImg5.setObjectName(\"ogImg5\")\n\n self.exImg5 = QtWidgets.QLabel(self.centralwidget)\n self.exImg5.setGeometry(QtCore.QRect(550, 220, 150, 150))\n self.exImg5.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg5.setText(\"\")\n self.exImg5.setObjectName(\"exImg5\")\n\n self.ogImg6 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg6.setGeometry(QtCore.QRect(740, 220, 150, 150))\n self.ogImg6.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg6.setText(\"\")\n self.ogImg6.setObjectName(\"ogImg6\")\n\n self.exImg6 = QtWidgets.QLabel(self.centralwidget)\n self.exImg6.setGeometry(QtCore.QRect(910, 220, 150, 150))\n self.exImg6.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg6.setText(\"\")\n self.exImg6.setObjectName(\"exImg6\")\n\n self.ogImg9 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg9.setGeometry(QtCore.QRect(740, 420, 150, 150))\n self.ogImg9.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg9.setText(\"\")\n self.ogImg9.setObjectName(\"ogImg9\")\n\n self.ogImg8 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg8.setGeometry(QtCore.QRect(380, 420, 150, 150))\n self.ogImg8.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg8.setText(\"\")\n self.ogImg8.setObjectName(\"ogImg8\")\n\n self.exImg9 = QtWidgets.QLabel(self.centralwidget)\n self.exImg9.setGeometry(QtCore.QRect(910, 420, 150, 150))\n self.exImg9.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg9.setText(\"\")\n self.exImg9.setObjectName(\"exImg9\")\n\n self.ogImg7 = QtWidgets.QLabel(self.centralwidget)\n self.ogImg7.setGeometry(QtCore.QRect(20, 420, 150, 150))\n self.ogImg7.setFrameShape(QtWidgets.QFrame.Box)\n self.ogImg7.setText(\"\")\n self.ogImg7.setObjectName(\"ogImg7\")\n\n self.exImg7 = QtWidgets.QLabel(self.centralwidget)\n self.exImg7.setGeometry(QtCore.QRect(190, 420, 150, 150))\n self.exImg7.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg7.setText(\"\")\n self.exImg7.setObjectName(\"exImg7\")\n\n self.exImg8 = QtWidgets.QLabel(self.centralwidget)\n self.exImg8.setGeometry(QtCore.QRect(550, 420, 150, 150))\n self.exImg8.setFrameShape(QtWidgets.QFrame.Box)\n self.exImg8.setText(\"\")\n self.exImg8.setObjectName(\"exImg8\")\n\n self.commonColorLbl = QtWidgets.QLabel(self.centralwidget)\n self.commonColorLbl.setGeometry(QtCore.QRect(20, 610, 800, 150))\n self.commonColorLbl.setFrameShape(QtWidgets.QFrame.Box)\n self.commonColorLbl.setText(\"\")\n self.commonColorLbl.setObjectName(\"commonColorLbl\")\n\n self.commonColorSlider = QtWidgets.QSlider(self.centralwidget)\n self.commonColorSlider.setGeometry(QtCore.QRect(840, 670, 160, 22))\n self.commonColorSlider.setMinimum(1)\n self.commonColorSlider.setMaximum(5)\n self.commonColorSlider.setOrientation(QtCore.Qt.Horizontal)\n self.commonColorSlider.setObjectName(\"commonColorSlider\")\n\n self.commonColorValue = QtWidgets.QLabel(self.centralwidget)\n self.commonColorValue.setGeometry(QtCore.QRect(1020, 670, 50, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n self.commonColorValue.setFont(font)\n self.commonColorValue.setFrameShape(QtWidgets.QFrame.Box)\n self.commonColorValue.setText(\"\")\n self.commonColorValue.setAlignment(QtCore.Qt.AlignCenter)\n self.commonColorValue.setObjectName(\"commonColorValue\")\n\n self.redAverageValue = QtWidgets.QLabel(self.centralwidget)\n self.redAverageValue.setGeometry(QtCore.QRect(280, 810, 50, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n self.redAverageValue.setFont(font)\n self.redAverageValue.setFrameShape(QtWidgets.QFrame.Box)\n self.redAverageValue.setText(\"\")\n self.redAverageValue.setAlignment(QtCore.Qt.AlignCenter)\n self.redAverageValue.setObjectName(\"redAverageValue\")\n\n self.redRangeValue = QtWidgets.QLabel(self.centralwidget)\n self.redRangeValue.setGeometry(QtCore.QRect(280, 890, 50, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n self.redRangeValue.setFont(font)\n self.redRangeValue.setFrameShape(QtWidgets.QFrame.Box)\n self.redRangeValue.setText(\"\")\n self.redRangeValue.setAlignment(QtCore.Qt.AlignCenter)\n self.redRangeValue.setObjectName(\"redRangeValue\")\n\n self.redAverageText = QtWidgets.QLabel(self.centralwidget)\n self.redAverageText.setGeometry(QtCore.QRect(100, 780, 100, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n font.setBold(False)\n font.setWeight(50)\n self.redAverageText.setFont(font)\n self.redAverageText.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.redAverageText.setAlignment(QtCore.Qt.AlignCenter)\n self.redAverageText.setObjectName(\"redAverageText\")\n\n self.redAverageSlider = QtWidgets.QSlider(self.centralwidget)\n self.redAverageSlider.setGeometry(QtCore.QRect(20, 810, 241, 22))\n self.redAverageSlider.setMaximum(255)\n self.redAverageSlider.setSliderPosition(128)\n self.redAverageSlider.setOrientation(QtCore.Qt.Horizontal)\n self.redAverageSlider.setObjectName(\"redAverageSlider\")\n\n self.redRangeSlider = QtWidgets.QSlider(self.centralwidget)\n self.redRangeSlider.setGeometry(QtCore.QRect(20, 890, 241, 22))\n self.redRangeSlider.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.redRangeSlider.setMaximum(127)\n self.redRangeSlider.setProperty(\"value\", 0)\n self.redRangeSlider.setSliderPosition(0)\n self.redRangeSlider.setOrientation(QtCore.Qt.Horizontal)\n self.redRangeSlider.setObjectName(\"redRangeSlider\")\n\n self.redRangeText = QtWidgets.QLabel(self.centralwidget)\n self.redRangeText.setGeometry(QtCore.QRect(90, 860, 100, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n font.setBold(False)\n font.setWeight(50)\n self.redRangeText.setFont(font)\n self.redRangeText.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.redRangeText.setAlignment(QtCore.Qt.AlignCenter)\n self.redRangeText.setObjectName(\"redRangeText\")\n\n self.greenRangeValue = QtWidgets.QLabel(self.centralwidget)\n self.greenRangeValue.setGeometry(QtCore.QRect(630, 888, 50, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n self.greenRangeValue.setFont(font)\n self.greenRangeValue.setFrameShape(QtWidgets.QFrame.Box)\n self.greenRangeValue.setText(\"\")\n self.greenRangeValue.setAlignment(QtCore.Qt.AlignCenter)\n self.greenRangeValue.setObjectName(\"greenRangeValue\")\n\n self.greenAverageText = QtWidgets.QLabel(self.centralwidget)\n self.greenAverageText.setGeometry(QtCore.QRect(430, 778, 120, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n font.setBold(False)\n font.setWeight(50)\n self.greenAverageText.setFont(font)\n self.greenAverageText.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.greenAverageText.setAlignment(QtCore.Qt.AlignCenter)\n self.greenAverageText.setObjectName(\"greenAverageText\")\n\n self.greenAverageValue = QtWidgets.QLabel(self.centralwidget)\n self.greenAverageValue.setGeometry(QtCore.QRect(630, 810, 50, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n self.greenAverageValue.setFont(font)\n self.greenAverageValue.setFrameShape(QtWidgets.QFrame.Box)\n self.greenAverageValue.setText(\"\")\n self.greenAverageValue.setAlignment(QtCore.Qt.AlignCenter)\n self.greenAverageValue.setObjectName(\"greenAverageValue\")\n\n self.greenRangeText = QtWidgets.QLabel(self.centralwidget)\n self.greenRangeText.setGeometry(QtCore.QRect(440, 858, 100, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n font.setBold(False)\n font.setWeight(50)\n self.greenRangeText.setFont(font)\n self.greenRangeText.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.greenRangeText.setAlignment(QtCore.Qt.AlignCenter)\n self.greenRangeText.setObjectName(\"greenRangeText\")\n\n self.greenAverageSlider = QtWidgets.QSlider(self.centralwidget)\n self.greenAverageSlider.setGeometry(QtCore.QRect(370, 810, 241, 22))\n self.greenAverageSlider.setMaximum(255)\n self.greenAverageSlider.setProperty(\"value\", 128)\n self.greenAverageSlider.setOrientation(QtCore.Qt.Horizontal)\n self.greenAverageSlider.setObjectName(\"greenAverageSlider\")\n\n self.greenRangeSlider = QtWidgets.QSlider(self.centralwidget)\n self.greenRangeSlider.setGeometry(QtCore.QRect(370, 890, 241, 20))\n self.greenRangeSlider.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.greenRangeSlider.setMaximum(127)\n self.greenRangeSlider.setProperty(\"value\", 0)\n self.greenRangeSlider.setSliderPosition(0)\n self.greenRangeSlider.setOrientation(QtCore.Qt.Horizontal)\n self.greenRangeSlider.setObjectName(\"greenRangeSlider\")\n\n self.blueAverageValue = QtWidgets.QLabel(self.centralwidget)\n self.blueAverageValue.setGeometry(QtCore.QRect(970, 810, 50, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n self.blueAverageValue.setFont(font)\n self.blueAverageValue.setFrameShape(QtWidgets.QFrame.Box)\n self.blueAverageValue.setText(\"\")\n self.blueAverageValue.setAlignment(QtCore.Qt.AlignCenter)\n self.blueAverageValue.setObjectName(\"blueAverageValue\")\n\n self.blueRangeSlider = QtWidgets.QSlider(self.centralwidget)\n self.blueRangeSlider.setGeometry(QtCore.QRect(710, 890, 241, 22))\n self.blueRangeSlider.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.blueRangeSlider.setMaximum(127)\n self.blueRangeSlider.setSliderPosition(0)\n self.blueRangeSlider.setOrientation(QtCore.Qt.Horizontal)\n self.blueRangeSlider.setObjectName(\"blueRangeSlider\")\n\n self.blueAverageSlider = QtWidgets.QSlider(self.centralwidget)\n self.blueAverageSlider.setGeometry(QtCore.QRect(710, 810, 241, 22))\n self.blueAverageSlider.setMaximum(255)\n self.blueAverageSlider.setProperty(\"value\", 128)\n self.blueAverageSlider.setOrientation(QtCore.Qt.Horizontal)\n self.blueAverageSlider.setObjectName(\"blueAverageSlider\")\n\n self.blueRangeText = QtWidgets.QLabel(self.centralwidget)\n self.blueRangeText.setGeometry(QtCore.QRect(780, 860, 100, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n font.setBold(False)\n font.setWeight(50)\n self.blueRangeText.setFont(font)\n self.blueRangeText.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.blueRangeText.setAlignment(QtCore.Qt.AlignCenter)\n self.blueRangeText.setObjectName(\"blueRangeText\")\n\n self.blueRangeValue = QtWidgets.QLabel(self.centralwidget)\n self.blueRangeValue.setGeometry(QtCore.QRect(970, 890, 50, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n self.blueRangeValue.setFont(font)\n self.blueRangeValue.setFrameShape(QtWidgets.QFrame.Box)\n self.blueRangeValue.setText(\"\")\n self.blueRangeValue.setAlignment(QtCore.Qt.AlignCenter)\n self.blueRangeValue.setObjectName(\"blueRangeValue\")\n\n self.blueAverageText = QtWidgets.QLabel(self.centralwidget)\n self.blueAverageText.setGeometry(QtCore.QRect(780, 780, 100, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n font.setBold(False)\n font.setWeight(50)\n self.blueAverageText.setFont(font)\n self.blueAverageText.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.blueAverageText.setAlignment(QtCore.Qt.AlignCenter)\n self.blueAverageText.setObjectName(\"blueAverageText\")\n\n self.author = QtWidgets.QLabel(self.centralwidget)\n self.author.setGeometry(QtCore.QRect(1000, 920, 101, 20))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(10)\n self.author.setFont(font)\n self.author.setObjectName(\"author\")\n\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n\n self.timer = QtCore.QTimer()\n\n self.readFile()\n\n # Timer\n self.timer.timeout.connect(self.update)\n self.timer.start(10)\n\n # Mouse\n self.commonColorLbl.mousePressEvent = self.captureCommon\n \n self.exImg1.mousePressEvent = self.img1Select\n self.ogImg1.mousePressEvent = self.img1Select\n\n self.exImg2.mousePressEvent = self.img2Select\n self.ogImg2.mousePressEvent = self.img2Select\n\n self.exImg3.mousePressEvent = self.img3Select\n self.ogImg3.mousePressEvent = self.img3Select\n\n self.exImg4.mousePressEvent = self.img4Select\n self.ogImg4.mousePressEvent = self.img4Select\n\n self.exImg5.mousePressEvent = self.img5Select\n self.ogImg5.mousePressEvent = self.img5Select\n\n self.exImg6.mousePressEvent = self.img6Select\n self.ogImg6.mousePressEvent = self.img6Select\n\n self.exImg7.mousePressEvent = self.img7Select\n self.ogImg7.mousePressEvent = self.img7Select\n\n self.exImg8.mousePressEvent = self.img8Select\n self.ogImg8.mousePressEvent = self.img8Select\n\n self.exImg9.mousePressEvent = self.img9Select\n self.ogImg9.mousePressEvent = self.img9Select\n\n # Slider\n self.redAverageSlider.valueChanged.connect(self.redAverageUpdate)\n self.redRangeSlider.valueChanged.connect(self.redRangeUpdate)\n self.greenAverageSlider.valueChanged.connect(self.greenAverageUpdate)\n self.greenRangeSlider.valueChanged.connect(self.greenRangeUpdate)\n self.blueAverageSlider.valueChanged.connect(self.blueAverageUpdate)\n self.blueRangeSlider.valueChanged.connect(self.blueRangeUpdate)\n self.commonColorSlider.valueChanged.connect(self.commonColorNumUpdate)\n\n \n def readFile(self):\n \"\"\" \n Before the whole GUI is launch, this function will be triggered. \n This will read every single image file that is contained inside the OG folder. \n Then save it with the size of 400 width and 400 height inside the image folder. \n After that, it will calculate common top 5 colors and proportion. \n Throughout the function, all the data will be saved inside pictures global variable. \n Once everydata is complete, it will be saved into pickle file so that reduce the complie time for next run. \n Last, when the image is added or removed, it will automatically do itself. \n\n Related Function:\n analyzeColor(filePath, ind)\n \"\"\"\n global pictures\n new = False\n ogObjects = [self.ogImg1, self.ogImg2, self.ogImg3, self.ogImg4, self.ogImg5, self.ogImg6, self.ogImg7, self.ogImg8, self.ogImg9]\n path = os.path.abspath(os.getcwd()) + \"\\OG\"\n\n # If these folder/file do not exists, create one\n if not os.path.exists('images'):\n os.makedirs('images')\n if not os.path.exists('Pic.pickle'):\n f = open(\"Pic.pickle\", \"x\")\n new = True\n elif os.path.getsize('Pic.pickle') == 0:\n f = open(\"Pic.pickle\", \"w\")\n new = True\n else:\n # if the file exists, open it and call the data \n old_file = open(\"Pic.pickle\", 'rb')\n pictures = pickle.load(old_file)\n old_file.close()\n\n for i, filename in enumerate(os.listdir(path)):\n # only run first cold start with no data\n if new:\n img = cv2.imread(os.path.join(path, filename))\n imgPath = os.path.abspath(os.getcwd()) + f\"\\images\\{filename + str(i)}.png\"\n dsize = (150,150)\n output = cv2.resize(img, dsize)\n cv2.imwrite(imgPath, output)\n pictures.append([])\n pictures[i].append(os.path.join(path, filename))\n pictures[i].append(imgPath)\n self.analyzeColor(imgPath, i)\n\n\n # Save the updated data into pickle\n old_file = open(\"Pic.pickle\", 'wb')\n pickle.dump(pictures, old_file)\n old_file.close()\n\n for j in range (9):\n pixmap = QtGui.QPixmap(pictures[j][1])\n self.widget = ogObjects[j]\n self.widget.setPixmap(pixmap)\n self.widget.setAlignment(QtCore.Qt.AlignLeft)\n\n def analyzeColor(self, filePath, ind):\n \"\"\"\n This will analyze the top 5 common color for a certain image\n It will store the values of proportion of color and color code inside the global variable of pictures.\n\n Args:\n filePath: the directory of the image file, String\n ind: index of the pictures list, Int\n\n Related Functions:\n make_histogram(cluster) \n \"\"\"\n global pictures\n # read the image\n img = cv2.imread(filePath)\n\n # Change the image display to a single line-ish\n height, width, _ = np.shape(img)\n image = img.reshape((height * width, 3))\n\n # Set the cluster to 5 since we are calculating 5 of them\n clusters = KMeans(n_clusters=5)\n\n # Find the mid points of each nearest cluster point\n clusters.fit(image)\n\n # Count the frequencies/proportion of each color\n histogram = self.make_histogram(clusters)\n\n # Combine and sort with the most to least according to the frequency count\n ordered = zip(histogram, clusters.cluster_centers_)\n ordered = sorted(ordered, key=lambda x: x[0], reverse = True)\n\n # Append two empty list that will be store the colors and proportion values\n pictures[ind].append([])\n pictures[ind].append([])\n\n # Add the colors and proportion values to each new appended list\n for index, row in enumerate(ordered):\n pictures[ind][2].append((int(row[1][2]), int(row[1][1]), int(row[1][0])))\n pictures[ind][3].append('%.2f'%(row[0] * 100))\n\n def make_histogram(self, clusters):\n \"\"\"\n Count the number of pixels in each cluster\n\n Args:\n cluster: The KMeans cluster\n\n Returns:\n A numpy Histogram\n \"\"\"\n numLabels = np.arange(0, len(np.unique(clusters.labels_)) + 1)\n hist, _ = np.histogram(clusters.labels_, bins = numLabels)\n hist = hist.astype('float32')\n hist /= hist.sum()\n return hist\n\n\n def commonColorNumUpdate(self):\n \"\"\"\n This function will update the value of num when the commonColorSlider is moved.\n \"\"\"\n global num\n num = self.commonColorSlider.sliderPosition()\n \n def redRangeUpdate(self):\n \"\"\"\n This function will update the value of redRange when the redRangeSlider is moved\n \"\"\"\n global redRange\n redRange = self.redRangeSlider.sliderPosition()\n\n def redAverageUpdate(self):\n \"\"\"\n This function will update the value of redAverage when the redAverageSlider is moved\n \"\"\"\n global redAverage\n redAverage = self.redAverageSlider.sliderPosition()\n\n def greenRangeUpdate(self):\n \"\"\"\n This function will update the value of greenRange when the greenRangeSlider is moved\n \"\"\"\n global greenRange\n greenRange = self.greenRangeSlider.sliderPosition()\n\n def greenAverageUpdate(self):\n \"\"\"\n This function will update the value of greenAverage when the greenAverageSlider is moved\n \"\"\"\n global greenAverage\n greenAverage = self.greenAverageSlider.sliderPosition()\n\n def blueRangeUpdate(self):\n \"\"\"\n This function will update the value of blueRange when the blueRangeSlider is moved\n \"\"\"\n global blueRange\n blueRange = self.blueRangeSlider.sliderPosition()\n\n def blueAverageUpdate(self):\n \"\"\"\n This function will update the value of blueAverage when the blueAverageSlider is moved\n \"\"\"\n global blueAverage\n blueAverage = self.blueAverageSlider.sliderPosition()\n \n def update(self):\n \"\"\"\n This function will be (almost) continuously running when the GUI is up.\n When the user change any values, it will automatically update text value or functions that uses those values.\n Or it will update the preview color when the user select certain location on an image\n\n Related Functions:\n colorDetect()\n showCommonColor()\n \"\"\"\n \n global redAverage, redRange, greenAverage, greenRange, blueAverage, blueRange, num, index\n\n # Update the Text Value\n self.redAverageValue.setNum(redAverage)\n self.redRangeValue.setNum(redRange)\n self.greenAverageValue.setNum(greenAverage)\n self.greenRangeValue.setNum(greenRange)\n self.blueAverageValue.setNum(blueAverage)\n self.blueRangeValue.setNum(blueRange)\n self.commonColorValue.setNum(num)\n\n # Update the Slider Position\n self.redAverageSlider.setSliderPosition(redAverage)\n self.redRangeSlider.setSliderPosition(redRange)\n self.greenAverageSlider.setSliderPosition(greenAverage)\n self.greenRangeSlider.setSliderPosition(greenRange)\n self.blueAverageSlider.setSliderPosition(blueAverage)\n self.blueRangeSlider.setSliderPosition(blueRange)\n\n self.colorDetect()\n self.showCommonColor(index)\n \n\n\n def captureCommon(self, event):\n \"\"\"\n Same idea with the captureIt function but instead it is for common color Image.\n Also, it will not remember the x and y value since it is not important.\n\n Args:\n event: event variable to capture the x and y values\n\n \"\"\"\n global redAverage, blueAverage, greenAverage\n # Get the x and y coordinates from the event input\n xValue = event.pos().x()\n yValue = event.pos().y()\n\n # Get the color as an list from PyQt5 dependency from the common_colors\n qImg = QtGui.QImage(\"common\\Common_Colors.png\")\n c = qImg.pixel(xValue, yValue)\n colors = QtGui.QColor(c).getRgb()\n\n # Store the colors as global varialbes\n redAverage = colors[0]\n greenAverage = colors[1]\n blueAverage = colors[2]\n\n\n def make_bar(self, color):\n \"\"\"\n Create an image of a given color\n\n Args:\n color: BRG pixel values of the color from cv2, (B, G, R)\n\n Returns:\n tuple of bar, rgb values, and hsv values\n \"\"\"\n bar = np.zeros((150, 160, 3), np.uint8)\n bar[:] = [color[2], color[1], color[0]]\n return bar\n\n def img1Select(self, event):\n global index\n index = 0\n\n def img2Select(self, event):\n global index\n index = 1\n\n def img3Select(self, event):\n global index\n index = 2\n\n def img4Select(self, event):\n global index\n index = 3\n\n def img5Select(self, event):\n global index\n index = 4\n\n def img6Select(self, event):\n global index\n index = 5\n\n def img7Select(self, event):\n global index\n index = 6\n\n def img8Select(self, event):\n global index\n index = 7\n\n def img9Select(self, event):\n global index\n index = 8\n\n\n def showCommonColor(self, index):\n \"\"\"\n Display the common colors for the image that is currently display.\n \"\"\"\n global pictures, num\n # Create an empy list\n bars = []\n\n for i in range(num):\n # Create a bar and append it to the bars list\n bar = self.make_bar(pictures[index][2][i])\n bars.append(bar)\n\n # Make an image in a vertical stack of bars that was appened into the bars list\n img = np.hstack(bars)\n \n for j in range(num):\n # If the color is too dark (a.k.a black), display the text color as white, otherwise black\n if (pictures[index][2][j][0] <= 100) and (pictures[index][2][j][1] <= 100) and (pictures[index][2][j][2] <= 100):\n textColor = (255, 255, 255)\n else:\n textColor = (0,0,0) \n \n # Add the color code on the first line of each bar and Percentage of the color on the second line\n img = cv2.putText(img, str(pictures[index][2][j]), (5 + (j * 160), 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, textColor, 1, cv2.LINE_AA)\n img = cv2.putText(img, str(pictures[index][3][j]) + \"%\", (5+ (j * 160) , 40 ), cv2.FONT_HERSHEY_TRIPLEX, 0.5, textColor, 1, cv2.LINE_AA)\n \n # If the folder common does not exists, create one\n if not os.path.exists('common'):\n os.makedirs('common')\n \n # Create an image file with the common color \n cv2.imwrite(\"common\\Common_Colors.png\", img)\n fileName = \"common\\Common_Colors.png\"\n\n # Display the common color on the GUI\n pixmap = QtGui.QPixmap(fileName)\n self.commonColorLbl.setPixmap(pixmap)\n self.commonColorLbl.setAlignment(QtCore.Qt.AlignLeft)\n\n\n def colorDetect(self):\n \"\"\"\n Calculate the min and max for RGB with Average and Range values.\n At the same time, if the user hits a limit, it will adjust to the max or min value.\n Also, when the user selects 128 for average value and range for 127, it will automatically switch min value to 0 even though it is techically 1\n Once the calculations are complete, it will trigger the color_detect function.\n \n Related Function:\n color_detect(redMin, redMax, greenMin, greenMax, blueMin, blueMax)\n \"\"\"\n global redAverage, redRange, greenAverage, greenRange, blueAverage, blueRange\n \n redMin = redAverage - redRange\n if (redMin < 0) or (redAverage == 128 and redRange == 127):\n redMin = 0\n redMax = redAverage + redRange\n if (redMax > 255):\n redMax = 255\n greenMin = greenAverage - greenRange\n if (greenMin < 0) or (greenAverage == 128 and greenRange == 127):\n greenMin = 0\n greenMax = greenAverage + greenRange\n if (greenMax > 255):\n greenMax = 255\n blueMin = blueAverage - blueRange\n if (blueMin < 0) or (blueAverage == 128 and blueRange == 127):\n blueMin = 0\n blueMax = blueAverage + blueRange\n if (blueMax > 255):\n blueMax = 255\n\n self.color_detect(redMin, redMax, greenMin, greenMax, blueMin, blueMax)\n\n def color_detect(self, redMin, redMax, greenMin, greenMax, blueMin, blueMax):\n \"\"\"\n Show the image color if the color is in the range that the user wants to see\n If it is not, it will show as (0,255,255): skyblue\n \n Args:\n redMin: Red Min value from Red Average - Red Range, Int\n redMax: Red Max value from Red Average + Red Range, Int\n greenMin: Green Min value from Green Average - Green Range, Int\n greenMax: Green Max value from Green Average + Green Range, Int\n blueMin: Blue Min value from Blue Average - Blue Range, Int\n blueMax: Blue Max value from Blue Average + Blue Range, Int\n \n \"\"\"\n global pictures\n exObjects = [self.exImg1, self.exImg2, self.exImg3, self.exImg4, self.exImg5, self.exImg6, self.exImg7, self.exImg8, self.exImg9]\n for i in range (9):\n # Read the Image\n img = cv2.imread(pictures[i][1])\n\n # Set an Array from Numpy for Lower and Upper bounds\n Lower = np.array([blueMin, greenMin, redMin], dtype = \"uint8\")\n Upper = np.array([blueMax, greenMax, redMax], dtype = \"uint8\")\n \n # If the color pixel is in range, then remain the color, otherwise change it to black\n mask = cv2.inRange(img, Lower, Upper)\n output = cv2.bitwise_and(img, img, mask = mask)\n\n # Copy the result of the image\n newBackground = img.copy()\n\n # Change the background color (255,255,0) == (B,G,R) for custom color\n newBackground[mask == 0] = (255,255,0)\n \n # Make the Image file for temporary\n cv2.imwrite(\"Detection.png\", newBackground)\n fileName = \"Detection.png\"\n\n # Display the Image file on the GUI\n pixmap = QtGui.QPixmap(fileName)\n self.executeImgLbl = exObjects[i]\n self.executeImgLbl.setPixmap(pixmap)\n self.executeImgLbl.setAlignment(QtCore.Qt.AlignLeft)\n\n # Remove the Image file\n os.remove(\"Detection.png\")\n\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.redAverageText.setText(_translate(\"MainWindow\", \"Red Average\"))\n self.redRangeText.setText(_translate(\"MainWindow\", \"Red Range\"))\n self.greenAverageText.setText(_translate(\"MainWindow\", \"Green Average\"))\n self.greenRangeText.setText(_translate(\"MainWindow\", \"Green Range\"))\n self.blueRangeText.setText(_translate(\"MainWindow\", \"Blue Range\"))\n self.blueAverageText.setText(_translate(\"MainWindow\", \"Blue Average\"))\n self.author.setText(_translate(\"MainWindow\", \"By Kenneth Kang\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "sub_path": "Archives/Full Version/Day 16, Version 7/Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 33175, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 29, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 30, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 34, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 36, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 36, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 48, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 48, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 54, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 58, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 58, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 60, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 64, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 64, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 71, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 71, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 72, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 72, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 76, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 77, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 78, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 82, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 82, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 83, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 83, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 84, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 88, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 88, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 89, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 89, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 90, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 90, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 94, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 94, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 95, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 95, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 96, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 96, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 100, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 100, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 101, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 101, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 102, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 102, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 106, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 106, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 107, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 108, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 108, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 112, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 112, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 113, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 113, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 114, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 114, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 118, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 118, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 119, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 119, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 120, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 120, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 124, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 124, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 125, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 125, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 126, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 126, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 130, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 130, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 131, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 132, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 132, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 136, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 136, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 137, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 137, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 138, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 138, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 142, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 143, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 146, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 146, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 149, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 149, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 150, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 150, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 151, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 151, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 155, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 155, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 157, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 157, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 160, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 160, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 161, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 161, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 162, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 162, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 166, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 166, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 168, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 168, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 171, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 171, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 172, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 172, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 173, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 173, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 177, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 177, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 179, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 179, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 182, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 182, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 183, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 183, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 184, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 190, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 190, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 191, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 191, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 194, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 194, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 195, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 195, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 198, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 198, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 201, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 201, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 202, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 202, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 203, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 203, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 207, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 207, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 210, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 210, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 211, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 211, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 212, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 212, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 218, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 218, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 219, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 219, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 222, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 222, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 223, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 223, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 224, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 224, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 228, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 228, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 230, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 230, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 233, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 233, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 234, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 234, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 235, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 235, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 241, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 241, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 242, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 242, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 245, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 245, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 246, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 246, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 247, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 247, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 251, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 251, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 253, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 253, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 256, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 256, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 257, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 257, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 258, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 258, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 264, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 264, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 265, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 265, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 268, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 268, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 269, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 269, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 272, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 272, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 275, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 275, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 276, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 276, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 277, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 277, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 281, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 281, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 284, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 284, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 285, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 285, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 286, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 286, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 290, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 290, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 292, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 292, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 295, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 295, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 296, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 296, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 297, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 297, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 300, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 300, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSlider", "line_number": 303, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 303, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 304, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 304, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 307, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 307, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 310, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 310, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 311, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 311, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 312, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 312, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 318, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 318, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 319, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 319, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 322, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 322, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 323, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 323, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 324, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 324, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 328, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 328, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 330, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 330, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 333, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 333, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 334, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 334, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 335, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 335, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 341, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 341, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 342, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 342, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 345, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 345, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 346, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 346, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 347, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 347, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName", "line_number": 356, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QMetaObject", "line_number": 356, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 356, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 359, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 359, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 423, "usage_type": "call"}, {"api_name": "os.path", "line_number": 423, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 423, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 426, "usage_type": "call"}, {"api_name": "os.path", "line_number": 426, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 427, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 428, "usage_type": "call"}, {"api_name": "os.path", "line_number": 428, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path", "line_number": 431, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 437, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 440, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 443, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 443, "usage_type": "call"}, {"api_name": "os.path", "line_number": 443, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 444, "usage_type": "call"}, {"api_name": "os.path", "line_number": 444, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 444, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 446, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 447, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path", "line_number": 449, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 456, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 460, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 460, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 463, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 463, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 482, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 518, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 623, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 623, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 625, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 625, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 643, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 698, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 708, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_TRIPLEX", "line_number": 708, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 708, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 709, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_TRIPLEX", "line_number": 709, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 709, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 712, "usage_type": "call"}, {"api_name": "os.path", "line_number": 712, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 713, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 716, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 720, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 720, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 722, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 722, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 776, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 779, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 780, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 783, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 784, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 793, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 797, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 797, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 800, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 800, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 803, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 807, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 807, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 820, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 820, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 820, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 821, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 821, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 825, "usage_type": "call"}]} +{"seq_id": "524120455", "text": "import boto3\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--stage\")\n\nargs = parser.parse_args()\n\nlambda_client = boto3.client('lambda')\n\nlambda_client.invoke(FunctionName='dp-core-db-migrator-' + args.stage + '-migrate', InvocationType='Event')\n", "sub_path": "migrate.py", "file_name": "migrate.py", "file_ext": "py", "file_size_in_byte": 273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "456402857", "text": "\"\"\"Python requirements management\"\"\"\nfrom __future__ import annotations\n\nimport base64\nimport dataclasses\nimport json\nimport os\nimport re\nimport typing as t\n\nfrom .constants import (\n COVERAGE_REQUIRED_VERSION,\n)\n\nfrom .encoding import (\n to_text,\n to_bytes,\n)\n\nfrom .io import (\n read_text_file,\n)\n\nfrom .util import (\n ANSIBLE_TEST_DATA_ROOT,\n ANSIBLE_TEST_TARGET_ROOT,\n ANSIBLE_TEST_TOOLS_ROOT,\n ApplicationError,\n SubprocessError,\n display,\n find_executable,\n raw_command,\n str_to_version,\n version_to_str,\n)\n\nfrom .util_common import (\n check_pyyaml,\n create_result_directories,\n)\n\nfrom .config import (\n EnvironmentConfig,\n IntegrationConfig,\n UnitsConfig,\n)\n\nfrom .data import (\n data_context,\n)\n\nfrom .host_configs import (\n PosixConfig,\n PythonConfig,\n)\n\nfrom .connections import (\n LocalConnection,\n Connection,\n)\n\nQUIET_PIP_SCRIPT_PATH = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'quiet_pip.py')\nREQUIREMENTS_SCRIPT_PATH = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'requirements.py')\n\n\n# Pip Abstraction\n\n\nclass PipUnavailableError(ApplicationError):\n \"\"\"Exception raised when pip is not available.\"\"\"\n def __init__(self, python): # type: (PythonConfig) -> None\n super().__init__(f'Python {python.version} at \"{python.path}\" does not have pip available.')\n\n\n@dataclasses.dataclass(frozen=True)\nclass PipCommand:\n \"\"\"Base class for pip commands.\"\"\"\"\"\n\n def serialize(self): # type: () -> t.Tuple[str, t.Dict[str, t.Any]]\n \"\"\"Return a serialized representation of this command.\"\"\"\n name = type(self).__name__[3:].lower()\n return name, self.__dict__\n\n\n@dataclasses.dataclass(frozen=True)\nclass PipInstall(PipCommand):\n \"\"\"Details required to perform a pip install.\"\"\"\n requirements: t.List[t.Tuple[str, str]]\n constraints: t.List[t.Tuple[str, str]]\n packages: t.List[str]\n\n def has_package(self, name): # type: (str) -> bool\n \"\"\"Return True if the specified package will be installed, otherwise False.\"\"\"\n name = name.lower()\n\n return (any(name in package.lower() for package in self.packages) or\n any(name in contents.lower() for path, contents in self.requirements))\n\n\n@dataclasses.dataclass(frozen=True)\nclass PipUninstall(PipCommand):\n \"\"\"Details required to perform a pip uninstall.\"\"\"\n packages: t.List[str]\n ignore_errors: bool\n\n\n@dataclasses.dataclass(frozen=True)\nclass PipVersion(PipCommand):\n \"\"\"Details required to get the pip version.\"\"\"\n\n\n@dataclasses.dataclass(frozen=True)\nclass PipBootstrap(PipCommand):\n \"\"\"Details required to bootstrap pip.\"\"\"\n pip_version: str\n packages: t.List[str]\n\n\n# Entry Points\n\n\ndef install_requirements(\n args, # type: EnvironmentConfig\n python, # type: PythonConfig\n ansible=False, # type: bool\n command=False, # type: bool\n coverage=False, # type: bool\n virtualenv=False, # type: bool\n controller=True, # type: bool\n connection=None, # type: t.Optional[Connection]\n): # type: (...) -> None\n \"\"\"Install requirements for the given Python using the specified arguments.\"\"\"\n create_result_directories(args)\n\n if not requirements_allowed(args, controller):\n return\n\n if command and isinstance(args, (UnitsConfig, IntegrationConfig)) and args.coverage:\n coverage = True\n\n cryptography = False\n\n if ansible:\n try:\n ansible_cache = install_requirements.ansible_cache # type: ignore[attr-defined]\n except AttributeError:\n ansible_cache = install_requirements.ansible_cache = {} # type: ignore[attr-defined]\n\n ansible_installed = ansible_cache.get(python.path)\n\n if ansible_installed:\n ansible = False\n else:\n ansible_cache[python.path] = True\n\n # Install the latest cryptography version that the current requirements can support if it is not already available.\n # This avoids downgrading cryptography when OS packages provide a newer version than we are able to install using pip.\n # If not installed here, later install commands may try to install a version of cryptography which cannot be installed.\n cryptography = not is_cryptography_available(python.path)\n\n commands = collect_requirements(\n python=python,\n controller=controller,\n ansible=ansible,\n cryptography=cryptography,\n command=args.command if command else None,\n coverage=coverage,\n virtualenv=virtualenv,\n minimize=False,\n sanity=None,\n )\n\n if not commands:\n return\n\n run_pip(args, python, commands, connection)\n\n # false positive: pylint: disable=no-member\n if any(isinstance(command, PipInstall) and command.has_package('pyyaml') for command in commands):\n check_pyyaml(python)\n\n\ndef collect_bootstrap(python): # type: (PythonConfig) -> t.List[PipCommand]\n \"\"\"Return the details necessary to bootstrap pip into an empty virtual environment.\"\"\"\n infrastructure_packages = get_venv_packages(python)\n pip_version = infrastructure_packages['pip']\n packages = [f'{name}=={version}' for name, version in infrastructure_packages.items()]\n\n bootstrap = PipBootstrap(\n pip_version=pip_version,\n packages=packages,\n )\n\n return [bootstrap]\n\n\ndef collect_requirements(\n python, # type: PythonConfig\n controller, # type: bool\n ansible, # type: bool\n cryptography, # type: bool\n coverage, # type: bool\n virtualenv, # type: bool\n minimize, # type: bool\n command, # type: t.Optional[str]\n sanity, # type: t.Optional[str]\n): # type: (...) -> t.List[PipCommand]\n \"\"\"Collect requirements for the given Python using the specified arguments.\"\"\"\n commands = [] # type: t.List[PipCommand]\n\n if virtualenv:\n # sanity tests on Python 2.x install virtualenv when it is too old or is not already installed and the `--requirements` option is given\n # the last version of virtualenv with no dependencies is used to minimize the changes made outside a virtual environment\n commands.extend(collect_package_install(packages=['virtualenv==16.7.12'], constraints=False))\n\n if coverage:\n commands.extend(collect_package_install(packages=[f'coverage=={COVERAGE_REQUIRED_VERSION}'], constraints=False))\n\n if cryptography:\n commands.extend(collect_package_install(packages=get_cryptography_requirements(python)))\n\n if ansible or command:\n commands.extend(collect_general_install(command, ansible))\n\n if sanity:\n commands.extend(collect_sanity_install(sanity))\n\n if command == 'units':\n commands.extend(collect_units_install())\n\n if command in ('integration', 'windows-integration', 'network-integration'):\n commands.extend(collect_integration_install(command, controller))\n\n if (sanity or minimize) and any(isinstance(command, PipInstall) for command in commands):\n # bootstrap the managed virtual environment, which will have been created without any installed packages\n # sanity tests which install no packages skip this step\n commands = collect_bootstrap(python) + commands\n\n # most infrastructure packages can be removed from sanity test virtual environments after they've been created\n # removing them reduces the size of environments cached in containers\n uninstall_packages = list(get_venv_packages(python))\n\n if not minimize:\n # installed packages may have run-time dependencies on setuptools\n uninstall_packages.remove('setuptools')\n\n commands.extend(collect_uninstall(packages=uninstall_packages))\n\n return commands\n\n\ndef run_pip(\n args, # type: EnvironmentConfig\n python, # type: PythonConfig\n commands, # type: t.List[PipCommand]\n connection, # type: t.Optional[Connection]\n): # type: (...) -> None\n \"\"\"Run the specified pip commands for the given Python, and optionally the specified host.\"\"\"\n connection = connection or LocalConnection(args)\n script = prepare_pip_script(commands)\n\n if not args.explain:\n try:\n connection.run([python.path], data=script, capture=False)\n except SubprocessError:\n script = prepare_pip_script([PipVersion()])\n\n try:\n connection.run([python.path], data=script, capture=True)\n except SubprocessError as ex:\n if 'pip is unavailable:' in ex.stdout + ex.stderr:\n raise PipUnavailableError(python)\n\n raise\n\n\n# Collect\n\n\ndef collect_general_install(\n command=None, # type: t.Optional[str]\n ansible=False, # type: bool\n): # type: (...) -> t.List[PipInstall]\n \"\"\"Return details necessary for the specified general-purpose pip install(s).\"\"\"\n requirements_paths = [] # type: t.List[t.Tuple[str, str]]\n constraints_paths = [] # type: t.List[t.Tuple[str, str]]\n\n if ansible:\n path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'ansible.txt')\n requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))\n\n if command:\n path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', f'{command}.txt')\n requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))\n\n return collect_install(requirements_paths, constraints_paths)\n\n\ndef collect_package_install(packages, constraints=True): # type: (t.List[str], bool) -> t.List[PipInstall]\n \"\"\"Return the details necessary to install the specified packages.\"\"\"\n return collect_install([], [], packages, constraints=constraints)\n\n\ndef collect_sanity_install(sanity): # type: (str) -> t.List[PipInstall]\n \"\"\"Return the details necessary for the specified sanity pip install(s).\"\"\"\n requirements_paths = [] # type: t.List[t.Tuple[str, str]]\n constraints_paths = [] # type: t.List[t.Tuple[str, str]]\n\n path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', f'sanity.{sanity}.txt')\n requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))\n\n if data_context().content.is_ansible:\n path = os.path.join(data_context().content.sanity_path, 'code-smell', f'{sanity}.requirements.txt')\n requirements_paths.append((data_context().content.root, path))\n\n return collect_install(requirements_paths, constraints_paths, constraints=False)\n\n\ndef collect_units_install(): # type: () -> t.List[PipInstall]\n \"\"\"Return details necessary for the specified units pip install(s).\"\"\"\n requirements_paths = [] # type: t.List[t.Tuple[str, str]]\n constraints_paths = [] # type: t.List[t.Tuple[str, str]]\n\n path = os.path.join(data_context().content.unit_path, 'requirements.txt')\n requirements_paths.append((data_context().content.root, path))\n\n path = os.path.join(data_context().content.unit_path, 'constraints.txt')\n constraints_paths.append((data_context().content.root, path))\n\n return collect_install(requirements_paths, constraints_paths)\n\n\ndef collect_integration_install(command, controller): # type: (str, bool) -> t.List[PipInstall]\n \"\"\"Return details necessary for the specified integration pip install(s).\"\"\"\n requirements_paths = [] # type: t.List[t.Tuple[str, str]]\n constraints_paths = [] # type: t.List[t.Tuple[str, str]]\n\n # Support for prefixed files was added to ansible-test in ansible-core 2.12 when split controller/target testing was implemented.\n # Previous versions of ansible-test only recognize non-prefixed files.\n # If a prefixed file exists (even if empty), it takes precedence over the non-prefixed file.\n prefixes = ('controller.' if controller else 'target.', '')\n\n for prefix in prefixes:\n path = os.path.join(data_context().content.integration_path, f'{prefix}requirements.txt')\n\n if os.path.exists(path):\n requirements_paths.append((data_context().content.root, path))\n break\n\n for prefix in prefixes:\n path = os.path.join(data_context().content.integration_path, f'{command}.{prefix}requirements.txt')\n\n if os.path.exists(path):\n requirements_paths.append((data_context().content.root, path))\n break\n\n for prefix in prefixes:\n path = os.path.join(data_context().content.integration_path, f'{prefix}constraints.txt')\n\n if os.path.exists(path):\n constraints_paths.append((data_context().content.root, path))\n break\n\n return collect_install(requirements_paths, constraints_paths)\n\n\ndef collect_install(\n requirements_paths, # type: t.List[t.Tuple[str, str]]\n constraints_paths, # type: t.List[t.Tuple[str, str]]\n packages=None, # type: t.Optional[t.List[str]]\n constraints=True, # type: bool\n) -> t.List[PipInstall]:\n \"\"\"Build a pip install list from the given requirements, constraints and packages.\"\"\"\n # listing content constraints first gives them priority over constraints provided by ansible-test\n constraints_paths = list(constraints_paths)\n\n if constraints:\n constraints_paths.append((ANSIBLE_TEST_DATA_ROOT, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt')))\n\n requirements = [(os.path.relpath(path, root), read_text_file(path)) for root, path in requirements_paths if usable_pip_file(path)]\n constraints = [(os.path.relpath(path, root), read_text_file(path)) for root, path in constraints_paths if usable_pip_file(path)]\n packages = packages or []\n\n if requirements or packages:\n installs = [PipInstall(\n requirements=requirements,\n constraints=constraints,\n packages=packages,\n )]\n else:\n installs = []\n\n return installs\n\n\ndef collect_uninstall(packages, ignore_errors=False): # type: (t.List[str], bool) -> t.List[PipUninstall]\n \"\"\"Return the details necessary for the specified pip uninstall.\"\"\"\n uninstall = PipUninstall(\n packages=packages,\n ignore_errors=ignore_errors,\n )\n\n return [uninstall]\n\n\n# Support\n\n\ndef get_venv_packages(python): # type: (PythonConfig) -> t.Dict[str, str]\n \"\"\"Return a dictionary of Python packages needed for a consistent virtual environment specific to the given Python version.\"\"\"\n\n # NOTE: This same information is needed for building the base-test-container image.\n # See: https://github.com/ansible/base-test-container/blob/main/files/installer.py\n\n default_packages = dict(\n pip='21.3.1',\n setuptools='60.8.2',\n wheel='0.37.1',\n )\n\n override_packages = {\n '2.7': dict(\n pip='20.3.4', # 21.0 requires Python 3.6+\n setuptools='44.1.1', # 45.0.0 requires Python 3.5+\n wheel=None,\n ),\n '3.5': dict(\n pip='20.3.4', # 21.0 requires Python 3.6+\n setuptools='50.3.2', # 51.0.0 requires Python 3.6+\n wheel=None,\n ),\n '3.6': dict(\n pip='21.3.1', # 22.0 requires Python 3.7+\n setuptools='59.6.0', # 59.7.0 requires Python 3.7+\n wheel=None,\n ),\n }\n\n packages = {name: version or default_packages[name] for name, version in override_packages.get(python.version, default_packages).items()}\n\n return packages\n\n\ndef requirements_allowed(args, controller): # type: (EnvironmentConfig, bool) -> bool\n \"\"\"\n Return True if requirements can be installed, otherwise return False.\n\n Requirements are only allowed if one of the following conditions is met:\n\n The user specified --requirements manually.\n The install will occur on the controller and the controller or controller Python is managed by ansible-test.\n The install will occur on the target and the target or target Python is managed by ansible-test.\n \"\"\"\n if args.requirements:\n return True\n\n if controller:\n return args.controller.is_managed or args.controller.python.is_managed\n\n target = args.only_targets(PosixConfig)[0]\n\n return target.is_managed or target.python.is_managed\n\n\ndef prepare_pip_script(commands): # type: (t.List[PipCommand]) -> str\n \"\"\"Generate a Python script to perform the requested pip commands.\"\"\"\n data = [command.serialize() for command in commands]\n\n display.info(f'>>> Requirements Commands\\n{json.dumps(data, indent=4)}', verbosity=3)\n\n args = dict(\n script=read_text_file(QUIET_PIP_SCRIPT_PATH),\n verbosity=display.verbosity,\n commands=data,\n )\n\n payload = to_text(base64.b64encode(to_bytes(json.dumps(args))))\n path = REQUIREMENTS_SCRIPT_PATH\n template = read_text_file(path)\n script = template.format(payload=payload)\n\n display.info(f'>>> Python Script from Template ({path})\\n{script.strip()}', verbosity=4)\n\n return script\n\n\ndef usable_pip_file(path): # type: (t.Optional[str]) -> bool\n \"\"\"Return True if the specified pip file is usable, otherwise False.\"\"\"\n return bool(path) and os.path.exists(path) and bool(os.path.getsize(path))\n\n\n# Cryptography\n\n\ndef is_cryptography_available(python): # type: (str) -> bool\n \"\"\"Return True if cryptography is available for the given python.\"\"\"\n try:\n raw_command([python, '-c', 'import cryptography'], capture=True)\n except SubprocessError:\n return False\n\n return True\n\n\ndef get_cryptography_requirements(python): # type: (PythonConfig) -> t.List[str]\n \"\"\"\n Return the correct cryptography and pyopenssl requirements for the given python version.\n The version of cryptography installed depends on the python version and openssl version.\n \"\"\"\n openssl_version = get_openssl_version(python)\n\n if openssl_version and openssl_version < (1, 1, 0):\n # cryptography 3.2 requires openssl 1.1.x or later\n # see https://cryptography.io/en/latest/changelog.html#v3-2\n cryptography = 'cryptography < 3.2'\n # pyopenssl 20.0.0 requires cryptography 3.2 or later\n pyopenssl = 'pyopenssl < 20.0.0'\n else:\n # cryptography 3.4+ builds require a working rust toolchain\n # systems bootstrapped using ansible-core-ci can access additional wheels through the spare-tire package index\n cryptography = 'cryptography'\n # any future installation of pyopenssl is free to use any compatible version of cryptography\n pyopenssl = ''\n\n requirements = [\n cryptography,\n pyopenssl,\n ]\n\n requirements = [requirement for requirement in requirements if requirement]\n\n return requirements\n\n\ndef get_openssl_version(python): # type: (PythonConfig) -> t.Optional[t.Tuple[int, ...]]\n \"\"\"Return the openssl version.\"\"\"\n if not python.version.startswith('2.'):\n # OpenSSL version checking only works on Python 3.x.\n # This should be the most accurate, since it is the Python we will be using.\n version = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'sslcheck.py')], capture=True)[0])['version']\n\n if version:\n display.info(f'Detected OpenSSL version {version_to_str(version)} under Python {python.version}.', verbosity=1)\n\n return tuple(version)\n\n # Fall back to detecting the OpenSSL version from the CLI.\n # This should provide an adequate solution on Python 2.x.\n openssl_path = find_executable('openssl', required=False)\n\n if openssl_path:\n try:\n result = raw_command([openssl_path, 'version'], capture=True)[0]\n except SubprocessError:\n result = ''\n\n match = re.search(r'^OpenSSL (?P[0-9]+\\.[0-9]+\\.[0-9]+)', result)\n\n if match:\n version = str_to_version(match.group('version'))\n\n display.info(f'Detected OpenSSL version {version_to_str(version)} using the openssl CLI.', verbosity=1)\n\n return version\n\n display.info('Unable to detect OpenSSL version.', verbosity=1)\n\n return None\n", "sub_path": "lib/python3.8/site-packages/ansible_test/_internal/python_requirements.py", "file_name": "python_requirements.py", "file_ext": "py", "file_size_in_byte": 19934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "util.ANSIBLE_TEST_TARGET_ROOT", "line_number": 62, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "util.ANSIBLE_TEST_TARGET_ROOT", "line_number": 63, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "util.ApplicationError", "line_number": 69, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 75, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 88, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 88, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 89, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 89, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 90, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 85, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 103, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 100, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 107, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 116, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 112, "usage_type": "call"}, {"api_name": "util_common.create_result_directories", "line_number": 133, "usage_type": "call"}, {"api_name": "config.UnitsConfig", "line_number": 138, "usage_type": "name"}, {"api_name": "config.IntegrationConfig", "line_number": 138, "usage_type": "name"}, {"api_name": "util_common.check_pyyaml", "line_number": 180, "usage_type": "call"}, {"api_name": "constants.COVERAGE_REQUIRED_VERSION", "line_number": 217, "usage_type": "name"}, {"api_name": "connections.LocalConnection", "line_number": 259, "usage_type": "call"}, {"api_name": "util.SubprocessError", "line_number": 265, "usage_type": "name"}, {"api_name": "util.SubprocessError", "line_number": 270, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 289, "usage_type": "call"}, {"api_name": "util.ANSIBLE_TEST_DATA_ROOT", "line_number": 289, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 289, "usage_type": "attribute"}, {"api_name": "util.ANSIBLE_TEST_DATA_ROOT", "line_number": 290, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 293, "usage_type": "call"}, {"api_name": "util.ANSIBLE_TEST_DATA_ROOT", "line_number": 293, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 293, "usage_type": "attribute"}, {"api_name": "util.ANSIBLE_TEST_DATA_ROOT", "line_number": 294, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 309, "usage_type": "call"}, {"api_name": "util.ANSIBLE_TEST_DATA_ROOT", "line_number": 309, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 309, "usage_type": "attribute"}, {"api_name": "util.ANSIBLE_TEST_DATA_ROOT", "line_number": 310, "usage_type": "name"}, {"api_name": "data.data_context", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 313, "usage_type": "call"}, {"api_name": "data.data_context", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path", "line_number": 324, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 324, "usage_type": "call"}, {"api_name": "data.data_context", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path", "line_number": 327, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 327, "usage_type": "call"}, {"api_name": "data.data_context", "line_number": 328, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 344, "usage_type": "call"}, {"api_name": "os.path", "line_number": 344, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 344, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path", "line_number": 346, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path", "line_number": 353, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 358, "usage_type": "call"}, {"api_name": "os.path", "line_number": 358, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 358, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path", "line_number": 360, "usage_type": "attribute"}, {"api_name": "data.data_context", "line_number": 361, "usage_type": "call"}, {"api_name": "util.ANSIBLE_TEST_DATA_ROOT", "line_number": 378, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 378, "usage_type": "call"}, {"api_name": "os.path", "line_number": 378, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 380, "usage_type": "call"}, {"api_name": "os.path", "line_number": 380, "usage_type": "attribute"}, {"api_name": "io.read_text_file", "line_number": 380, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 381, "usage_type": "call"}, {"api_name": "os.path", "line_number": 381, "usage_type": "attribute"}, {"api_name": "io.read_text_file", "line_number": 381, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 372, "usage_type": "attribute"}, {"api_name": "host_configs.PosixConfig", "line_number": 460, "usage_type": "argument"}, {"api_name": "util.display.info", "line_number": 469, "usage_type": "call"}, {"api_name": "util.display", "line_number": 469, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 469, "usage_type": "call"}, {"api_name": "io.read_text_file", "line_number": 472, "usage_type": "call"}, {"api_name": "util.display.verbosity", "line_number": 473, "usage_type": "attribute"}, {"api_name": "util.display", "line_number": 473, "usage_type": "name"}, {"api_name": "encoding.to_text", "line_number": 477, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 477, "usage_type": "call"}, {"api_name": "encoding.to_bytes", "line_number": 477, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 477, "usage_type": "call"}, {"api_name": "io.read_text_file", "line_number": 479, "usage_type": "call"}, {"api_name": "util.display.info", "line_number": 482, "usage_type": "call"}, {"api_name": "util.display", "line_number": 482, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 489, "usage_type": "call"}, {"api_name": "os.path", "line_number": 489, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 489, "usage_type": "call"}, {"api_name": "util.raw_command", "line_number": 498, "usage_type": "call"}, {"api_name": "util.SubprocessError", "line_number": 499, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 540, "usage_type": "call"}, {"api_name": "util.raw_command", "line_number": 540, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 540, "usage_type": "call"}, {"api_name": "util.ANSIBLE_TEST_TOOLS_ROOT", "line_number": 540, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 540, "usage_type": "attribute"}, {"api_name": "util.display.info", "line_number": 543, "usage_type": "call"}, {"api_name": "util.display", "line_number": 543, "usage_type": "name"}, {"api_name": "util.version_to_str", "line_number": 543, "usage_type": "call"}, {"api_name": "util.find_executable", "line_number": 549, "usage_type": "call"}, {"api_name": "util.raw_command", "line_number": 553, "usage_type": "call"}, {"api_name": "util.SubprocessError", "line_number": 554, "usage_type": "name"}, {"api_name": "re.search", "line_number": 557, "usage_type": "call"}, {"api_name": "util.str_to_version", "line_number": 560, "usage_type": "call"}, {"api_name": "util.display.info", "line_number": 562, "usage_type": "call"}, {"api_name": "util.display", "line_number": 562, "usage_type": "name"}, {"api_name": "util.version_to_str", "line_number": 562, "usage_type": "call"}, {"api_name": "util.display.info", "line_number": 566, "usage_type": "call"}, {"api_name": "util.display", "line_number": 566, "usage_type": "name"}]} +{"seq_id": "584980561", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 20 11:04:14 2019\n\n@author: menghan\n\"\"\"\n\nimport pandas as pd\nfrom gensim.models import KeyedVectors\nimport gensim.downloader as api\ndim = 25\nmodel_type = 'twitter'\nprint('Using '+ model_type + ' wv model')\ndef getVector(tweet, dim):\n cnt = []\n c=0\n vectors = np.zeros((len(tweet), dim))\n for i in range(len(tweet)):\n try:\n vectors[i,:] = model[tweet[i]]\n except:\n c+=1\n# print(tweet[i] + ' not in vocabulary')\n cnt.append(c)\n return vectors\n# If you see `SSL: CERTIFICATE_VERIFY_FAILED` error, use this:\n#import ssl\n#import urllib.request\n#ssl._create_default_https_context = ssl._create_unverified_context\n#\n#model = api.load(\"glove-twitter-100\")\nif model_type == 'twitter':\n model = KeyedVectors.load_word2vec_format('D:/DMLAB/glove-twitter-'+str(dim)+'.gz')\n print('load ok')\nelif model_type == 'google':\n model = KeyedVectors.load_word2vec_format('D:/DMLAB/GoogleNews-vectors-negative300.bin.gz', binary=True)\n print('load ok')\n\n#%%\nfrom sklearn.model_selection import train_test_split\n## load a pickle file\nfrac_df = pd.read_pickle(\"balance_train_64000.pkl\")\ntrain_df, test_df, _, _ = train_test_split(frac_df, frac_df['emotion'], test_size=0.2, random_state=0)\n#%%\nimport nltk\nimport numpy as np\ntokenizer = nltk.word_tokenize\ntraining_tokens = list(train_df['text'].apply(lambda x: nltk.word_tokenize(x)))\ntesting_tokens = list(test_df['text'].apply(lambda x: nltk.word_tokenize(x)))\n\ntraining_vectors = [getVector(x, dim) for x in training_tokens]\ntesting_vectors = [getVector(x, dim) for x in testing_tokens]\n\n#%%\nimport pickle\nfile = open('vector/balance_train_64000_' + model_type + '_' + str(dim) + '.pkl', 'wb')\npickle.dump(training_vectors, file)\nfile.close() \n\nfile = open('vector/balance_test_64000_' + model_type + '_' + str(dim) + '.pkl', 'wb')\npickle.dump(testing_vectors, file)\nfile.close() \n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "wordvector.py", "file_name": "wordvector.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 33, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 33, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 36, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 36, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 43, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 47, "usage_type": "attribute"}, {"api_name": "nltk.word_tokenize", "line_number": 48, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 49, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 57, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "271785759", "text": "import json\nclass Account():\n def __init__(self):\n self.cache = dict()\n with open('config/setting.json') as config_file:\n setting = json.load(config_file)\n self.offset = setting[\"config_cache\"][\"offset\"]\n self.size = setting[\"config_cache\"][\"size\"]\n for i in range(self.offset):\n self.cache[i] = []\n\n def find_in_cache(self,ac_no):\n #print(self.cache)\n ac_no = ac_no.strip(\"'\")\n print(\"find ac_no = \",ac_no)\n index = int(ac_no)%self.offset\n if(index in self.cache):\n for c in self.cache[index]:\n if(c[\"ac_no\"]==ac_no):\n tmp = c\n self.cache[index].remove(c)\n self.cache[index].insert(0,tmp)\n #print(self.cache)\n return tmp\n return False\n\n def store_in_cache(self,ac_no,host):\n #print(self.cache)\n ac_no = ac_no.strip(\"'\")\n print(\"store ac_no = \",ac_no,\" host = \",host)\n index = int(ac_no)%self.offset\n if(not self.find_in_cache(ac_no)):\n if(len(self.cache[index]) < self.size):\n self.cache[index].insert(0,{\"ac_no\":ac_no,\"host\":host})\n else:\n self.handle_memory(index)\n self.cache[index].insert(0,{\"ac_no\":ac_no,\"host\":host})\n #print(self.cache)\n return True\n return False\n \n def handle_memory(self,index):\n self.cache[index].pop(-1)", "sub_path": "Cache-service/Model/Account.py", "file_name": "Account.py", "file_ext": "py", "file_size_in_byte": 1505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "21389849", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Copyright 2013 Nextdoor.com, Inc.\n\n\"\"\"\n:mod:`tornado_rest_client.utils`\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nCommon package for utility functions.\n\"\"\"\n\nimport logging\nimport re\n\nfrom tornado import gen\n\n__author__ = 'Matt Wise (matt@nextdoor.com)'\n\nlog = logging.getLogger(__name__)\n\n\n@gen.coroutine\ndef tornado_sleep(seconds=1.0):\n \"\"\"Async method equivalent to sleeping.\n\n Args:\n seconds: Float seconds. Default 1.0\n \"\"\"\n yield gen.sleep(seconds)\n\n\ndef populate_with_tokens(string, tokens, left_wrapper='%', right_wrapper='%',\n strict=True):\n \"\"\"Insert token variables into the string.\n\n Will match any token wrapped in '%'s and replace it with the value of that\n token.\n\n Args:\n string: string to modify.\n tokens: dictionary of key:value pairs to inject into the string.\n left_wrapper: the character to use as the START of a token\n right_wrapper: the character to use as the END of a token\n strict: (bool) whether or not to make sure all tokens were replaced\n\n Example:\n export ME=biz\n\n string='foo %ME% %bar%'\n populate_with_tokens(string, os.environ) # 'foo biz %bar%'\n \"\"\"\n\n # First things first, swap out all instances of %% with any matching\n # token variables found. If no items are in the hash (none, empty hash,\n # etc), then skip this.\n allowed_types = (str, bool, int, float)\n if tokens:\n for k, v in tokens.items():\n\n if type(v) not in allowed_types:\n log.warning('Token %s=%s is not in allowed types: %s' % (\n k, v, allowed_types))\n continue\n\n string = string.replace(\n ('%s%s%s' % (left_wrapper, k, right_wrapper)), str(v))\n\n # If we aren't strict, we return...\n if not strict:\n return string\n\n # If we are strict, we check if we missed anything. If we did, raise an\n # exception.\n missed_tokens = list(set(re.findall(r'%s[\\w]+%s' %\n (left_wrapper, right_wrapper), string)))\n if missed_tokens:\n raise LookupError(\n 'Found un-matched tokens in JSON string: %s' % missed_tokens)\n\n return string\n", "sub_path": "tornado_rest_client/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2755, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "tornado.gen.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 39, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 32, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "307733870", "text": "from argparse import ArgumentParser\nimport tensorflow as tf\nfrom archs import P_Net, losses\nimport time\n# from math import pow\nimport os\nfrom random import shuffle\nimport numpy as np\nfrom PIL import Image\nimport math\nimport matplotlib.pyplot as plt\nfrom generator import generator_img_region\n\n\ndef initialize_uninitialized(sess):\n global_vars = tf.global_variables()\n is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n if len(not_initialized_vars):\n sess.run(tf.variables_initializer(not_initialized_vars))\n\n\nbatch = 32\nepoch = 1000000\nlear = 1e-3\nlss = []\nge = generator_img_region.generator(json_path='/data/train_label.json',\n image_path='/data/img_pyramids/',\n image_size=12,\n batch=int(batch/4),\n image_from_each_face=4)\n\nparser = ArgumentParser()\nparser.add_argument(\"-s\", \"--save-log\", help=\"save to train_log\", dest=\"save_log\", default=\"0\")\nparser.add_argument(\"-G\", \"--gpu-memory\", help=\"gpu memary used\", type=float, dest=\"gpu_memory\", default=\"0.4\")\nargs = parser.parse_args()\n\nsave_log = os.path.join('train_log',\n args.save_log)\n\nif not os.path.isdir(save_log):\n os.mkdir(save_log)\n\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\noutput_num = 10\n\nwith tf.Session(config=config) as sess:\n net = P_Net.P_Net()\n\n img = tf.placeholder(tf.float32,\n [batch, None, None, 3])\n lab = tf.placeholder(tf.float32,\n [batch, None, None, output_num])\n com_lab = tf.placeholder(tf.float32,\n [batch, None, None, output_num])\n lr = tf.placeholder(tf.float32,\n [None])\n\n output = net.P_pr(img)\n cls = output[:, :, :, :2]\n bbox = output[:, :, :, 2:6]\n eye_reg = output[:, :, :, 6:]\n\n lab_cls = lab[:, :, :, :2]\n lab_bbox = lab[:, :, :, 2:6]\n lab_eye_reg = lab[:, :, :, 6:]\n\n com_lab_cls = com_lab[:, :, :, :2]\n com_lab_bbox = com_lab[:, :, :, 2:6]\n com_lab_eye_reg = com_lab[:, :, :, 6:]\n\n cls_loss = losses.cross_entropy_loss(cls, lab_cls)\n bbox_loss = losses.Euclidean_loss(bbox, lab_bbox)*com_lab_bbox\n bbox_loss = tf.squeeze(bbox_loss)\n eye_reg_loss = losses.Euclidean_loss(eye_reg, lab_eye_reg)*com_lab_eye_reg\n eye_reg_loss = tf.squeeze(eye_reg_loss)\n\n loss = 1 * tf.reduce_sum(cls_loss) + 0.5 * tf.reduce_sum(bbox_loss) + 0.5 * tf.reduce_sum(eye_reg_loss)\n # loss = 0.5 * tf.reduce_sum(bbox_loss, 3) + 0.5 * tf.reduce_sum(eye_reg_loss, 3)\n\n train_step = tf.train.MomentumOptimizer(lr[0],\n 0.9). \\\n minimize(loss)\n\n model_af = tf.train.Saver()\n initialize_uninitialized(sess)\n\n print('begin', end=' :')\n for seq in range(epoch):\n\n if (seq + 1) % 20000 == 0:\n lear *= 0.5\n print(lear)\n\n begin_time = time.time()\n\n imgs, \\\n labs, com_labs\\\n = ge.__next__(12)\n labs = np.reshape(np.array(labs), [batch, 1, 1, 10])\n com_labs = np.reshape(np.array(com_labs), [batch, 1, 1, 10])\n\n sess.run(train_step,\n feed_dict={img: imgs,\n lab: labs,\n com_lab: com_labs,\n lr: [lear]})\n\n if seq % 5 == 0:\n\n print('\\nSequence:', str(seq))\n\n [ls_t,\n out] = sess.run([loss,\n output],\n feed_dict={img: imgs,\n lab: labs,\n com_lab: com_labs,\n lr: [lear]})\n out = np.squeeze(out)\n print(np.amax(labs))\n lss.append(ls_t)\n if len(lss) > 1e4:\n lss.remove(lss[0])\n\n plt.plot(range(len(lss)), lss)\n feed_back_folder = os.path.join(save_log, 'feed_back')\n\n if not os.path.isdir(feed_back_folder):\n os.mkdir(feed_back_folder)\n plt.savefig(os.path.join(feed_back_folder,\n 'l' + str(int(seq / 5e4)) + '.png'))\n #plt.show()\n plt.clf()\n\n if np.isnan(ls_t):\n input('isnan')\n\n avg_loss = sum([0.9 * \\\n math.pow(0.1,\n len(lss) - 1 - lsins) \\\n * ls \\\n for lsins, ls in enumerate(lss)])\n\n print('spand time: {0:.3f}, loss: {1:.3f}, max value: {2:.3f}'. \\\n format(time.time() - begin_time\n , avg_loss,\n np.amax(out[:, 2:])\n ))\n\n if (seq + 1) % 1000 == 0:\n save_folder = os.path.join(save_log, 'models')\n\n if not os.path.isdir(save_folder):\n os.mkdir(save_folder)\n\n model_af.save(sess,\n os.path.join(save_folder,\n str(seq + 1) + 'save_net.ckpt'))\n", "sub_path": "train_P.py", "file_name": "train_P.py", "file_ext": "py", "file_size_in_byte": 5381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "tensorflow.global_variables", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.is_variable_initialized", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.variables_initializer", "line_number": 20, "usage_type": "call"}, {"api_name": "generator.generator_img_region.generator", "line_number": 27, "usage_type": "call"}, {"api_name": "generator.generator_img_region", "line_number": 27, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 50, "usage_type": "call"}, {"api_name": "archs.P_Net.P_Net", "line_number": 51, "usage_type": "call"}, {"api_name": "archs.P_Net", "line_number": 51, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "archs.losses.cross_entropy_loss", "line_number": 75, "usage_type": "call"}, {"api_name": "archs.losses", "line_number": 75, "usage_type": "name"}, {"api_name": "archs.losses.Euclidean_loss", "line_number": 76, "usage_type": "call"}, {"api_name": "archs.losses", "line_number": 76, "usage_type": "name"}, {"api_name": "tensorflow.squeeze", "line_number": 77, "usage_type": "call"}, {"api_name": "archs.losses.Euclidean_loss", "line_number": 78, "usage_type": "call"}, {"api_name": "archs.losses", "line_number": 78, "usage_type": "name"}, {"api_name": "tensorflow.squeeze", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.train.MomentumOptimizer", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 88, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 139, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 143, "usage_type": "call"}, {"api_name": "time.time", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}]} +{"seq_id": "497316694", "text": "import multiprocessing\nfrom typing import Tuple, Union, Dict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom diffpy.srfit.fitbase import Profile, FitContribution\nfrom diffpy.srfit.fitbase.parameter import ParameterProxy\nfrom diffpy.srfit.pdf import PDFGenerator, DebyePDFGenerator\nfrom diffpy.srfit.structure.diffpyparset import DiffpyStructureParSet\nfrom diffpy.srfit.structure.objcrystparset import ObjCrystCrystalParSet\nfrom diffpy.srfit.structure.sgconstraints import constrainAsSpaceGroup\nfrom matplotlib.axes import Axes\nfrom pdfstream.visualization.main import visualize\nfrom scipy.optimize import least_squares\n\nfrom pdffitx.modeling.core import MyRecipe, MyContribution\nfrom .fitobjs import MyParser, ConConfig, GenConfig\n\n__all__ = [\n 'make_profile',\n 'make_generator',\n 'make_contribution',\n 'make_recipe',\n 'fit',\n 'plot',\n 'constrainAsSpaceGroup',\n 'sgconstrain',\n 'cfconstrain',\n 'sgconstrain_all',\n 'cfconstrain_all',\n 'get_sgpars'\n]\n\n\n# functions used in fitting\ndef make_profile(parser: MyParser, fit_range: Tuple[float, float, float]) -> Profile:\n \"\"\"\n Make a Profile, parse data file to it and set its calculation range.\n\n Parameters\n ----------\n parser\n The parser with parsed data from the data source.\n\n fit_range\n The tuple of (rmax, rmin, dr) in Angstrom.\n\n Returns\n -------\n profile\n The Profile with the parsed data and the calculation range.\n \"\"\"\n profile = Profile()\n profile.loadParsedData(parser)\n rmin, rmax, rstep = fit_range\n profile.setCalculationRange(rmin, rmax, rstep)\n return profile\n\n\ndef make_generator(genconfig: GenConfig) -> Union[PDFGenerator, DebyePDFGenerator]:\n \"\"\"\n Build a generator according to the information in the GenConfig.\n\n Parameters\n ----------\n genconfig : GenConfig\n A configuration instance for generator building.\n\n Returns\n -------\n generator: PDFGenerator or DebyePDFGenerator\n A generator built from GenConfig.\n \"\"\"\n generator = DebyePDFGenerator(genconfig.name) if genconfig.debye else PDFGenerator(genconfig.name)\n generator.setStructure(genconfig.structure, periodic=genconfig.structure)\n ncpu = genconfig.ncpu\n if ncpu:\n pool = multiprocessing.Pool(ncpu)\n generator.parallel(ncpu, mapfunc=pool.imap_unordered)\n return generator\n\n\ndef make_contribution(conconfig: ConConfig, xname: str = \"r\") -> MyContribution:\n \"\"\"\n Make a FitContribution according to the ConConfig.\n\n Parameters\n ----------\n conconfig : ConConfig\n The configuration instance for the FitContribution.\n\n xname : str\n The name of the independent variable. Default 'r'.\n\n Returns\n -------\n contribution : MyContribution\n The FitContribution built from ConConfig.\n \"\"\"\n contribution = MyContribution(conconfig.name)\n\n fit_range = conconfig.fit_range\n if contribution.profile is not None:\n profile = make_profile(conconfig.parser, fit_range)\n contribution.setProfile(profile, xname=xname)\n\n for genconfig in conconfig.genconfigs:\n generator = make_generator(genconfig)\n contribution.addProfileGenerator(generator)\n\n for base_line in conconfig.baselines:\n contribution.addProfileGenerator(base_line)\n\n for function in conconfig.funconfigs:\n name = function.name\n func_type = function.func\n argnames = function.argnames\n contribution.registerFunction(func_type, name, argnames)\n\n contribution.setEquation(conconfig.eq)\n contribution.setResidualEquation(conconfig.res_eq)\n\n return contribution\n\n\ndef make_recipe(*conconfigs: ConConfig) -> MyRecipe:\n \"\"\"\n Make a FitRecipe based on single or multiple ConConfig.\n\n Parameters\n ----------\n conconfigs\n The configurations of single or multiple FitContribution.\n\n Returns\n -------\n recipe\n MyRecipe built from ConConfigs.\n \"\"\"\n recipe = MyRecipe()\n\n for conconfig in conconfigs:\n contribution = make_contribution(conconfig)\n recipe.addContribution(contribution, conconfig.weight)\n\n recipe.clearFitHooks()\n\n return recipe\n\n\ndef fit(recipe: MyRecipe, **kwargs) -> None:\n \"\"\"\n Fit the data according to recipe. parameters associated with fitting can be set in kwargs.\n\n Parameters\n ----------\n recipe\n MyRecipe to fit.\n\n kwargs\n Parameters in fitting. They are\n verbose: how much information to print. Default 1.\n values: initial value for fitting. Default get from recipe.\n bounds: two list of lower and upper bounds. Default get from recipe.\n xtol, gtol, ftol: tolerance in least squares. Default 1.E-5, 1.E-5, 1.E-5.\n max_nfev: maximum number of evaluation of residual function. Default None.\n \"\"\"\n values = kwargs.get(\"values\", recipe.values)\n bounds = kwargs.get(\"bounds\", recipe.getBounds2())\n verbose = kwargs.get(\"verbose\", 1)\n xtol = kwargs.get(\"xtol\", 1.E-5)\n gtol = kwargs.get(\"gtol\", 1.E-5)\n ftol = kwargs.get(\"ftol\", 1.E-5)\n max_nfev = kwargs.get(\"max_fev\", None)\n least_squares(recipe.residual, values, bounds=bounds, verbose=verbose, xtol=xtol, gtol=gtol, ftol=ftol,\n max_nfev=max_nfev)\n return\n\n\ndef plot(contribution: FitContribution) -> Axes:\n \"\"\"\n Plot the fits for all FitContributions in the recipe.\n\n Parameters\n ----------\n contribution : FitContribution\n The FitRecipe.\n\n Returns\n -------\n ax : Axes\n The axes that has the plot.\n \"\"\"\n r = contribution.profile.x\n g = contribution.profile.y\n gcalc = contribution.profile.ycalc\n gdiff = g - gcalc\n data = np.stack([r, g, gcalc, gdiff])\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax = visualize(\n data,\n ax=ax,\n mode='fit',\n legends=[\"observed\", \"calculated\", \"zero\", \"residual\"],\n label=\"gr\"\n )\n ax.set_title(contribution.name)\n plt.show(block=False)\n return ax\n\n\ndef cfconstrain(recipe: MyRecipe, con_name: str, param_names: list = None, dv: Dict[str, float] = None,\n bounds: Dict[str, tuple] = None) -> Dict[str, ParameterProxy]:\n \"\"\"Add parameters in the contribution.\n\n Add parameters in the Characteristic functions in the FitContribution into the MyRecipe.\n Return the added variables in a dictionary.\n\n Parameters\n ----------\n recipe : MyRecipe\n The recipe to add the parameters\n\n con_name : str\n The name of the FitContribution where the parameters are.\n\n param_names : list\n The name of the parameter to be added in the recipe. If None, add all parameters.\n\n dv : dict\n The path to the .csv file contains the fitting results or the dictionary of values.\n If None, use par.value for any parameters as default value.\n\n bounds : dict\n The mapping from the name of the variable to the tuple of bounds (min, max). Defulat (0, +inf).\n\n Returns\n -------\n variables : dict\n The dictionary mapping from the name of the variable to the variable itself.\n \"\"\"\n if dv is None:\n dv = dict()\n if bounds is None:\n bounds = dict()\n variables = dict()\n con: MyContribution = getattr(recipe, con_name)\n if param_names is None:\n # get all the parameter in the contribution except the independent variable\n pars = {\n arg\n for eq in con.eqfactory.equations\n if eq.name == \"eq\"\n for arg in eq.args\n if arg.name != con.xname\n }\n else:\n pars = {getattr(con, param_name) for param_name in param_names}\n for par in pars:\n variables[par.name] = recipe.addVar(par, value=dv.get(par.name, par.value), tag=\"cf\").boundRange(\n *bounds.get(par.name, (-np.inf, np.inf)))\n return variables\n\n\ndef cfconstrain_all(recipe: MyRecipe, dv: dict = None, bounds: dict = None):\n \"\"\"Constrain all the parameters in registered functions and string equations.\"\"\"\n variables = dict()\n for con_name in recipe.contributions:\n variables.update(\n cfconstrain(recipe, con_name, dv=dv, bounds=bounds)\n )\n return variables\n\n\ndef sgconstrain(recipe: MyRecipe, con_name: str, gen_name: str, sg: Union[int, str] = None,\n dv: Union[str, Dict[str, float]] = None, bounds: Dict[str, tuple] = None,\n add_xyz: bool = True) -> Dict[str, ParameterProxy]:\n \"\"\"Constrain the generator by space group.\n\n The constrained parameters are scale, delta2, lattice parameters, ADPs and xyz coordinates. The lattice\n constants and xyz coordinates are constrained by space group while the ADPs are constrained by elements.\n All paramters will be added as '{par.name}_{gen.name}'. The parameters tags are scale_{gen.name},\n delta2_{gen.name}, lat_{gen.name}, adp_{gen.name}, xyz_{gen.name}. Return the added variables in a\n dictionary.\n\n Parameters\n ----------\n recipe\n The recipe to add variables.\n\n con_name\n The name of the FitContribution where the PDFGenerator is in. If None, get the first contribution.\n Default None.\n\n gen_name\n The name of the PDFGenerator to constrain. If None, constrain the first generator in contribution.\n\n sg\n The space group. The expression can be number or name. If the structure is Crystal object, use internal\n constrain.\n\n dv\n The path to the .csv file contains the fitting results or the dictionary of values.\n If None, the following values will be used:\n type, initiate value, range, tag\n scale, 0, (0, inf), scale_{gen.name}\n delta2, 0, (0, inf), delta2_{gen.name}\n lat, par.value, (0, 2 * par.value), lat_{gen.name}\n adp, 0.05, (0, inf), adp_{gen.name}\n xyz, par.value, None, xyz_{gen.name}\n\n bounds\n The mapping from the name of the variable to the tuple of the arguments for the bounding function.\n\n add_xyz\n Whether to constrain xyz coordinates. If True, xyz will be added as fixed variable. Default True.\n\n Returns\n -------\n variables\n The dictionary mapping from the name of the variable to the variable itself.\n \"\"\"\n # initiate variables\n variables = dict()\n # the default of variables\n if dv is None:\n dv = dict()\n # the bounds\n if bounds is None:\n bounds = dict()\n # get FitContribution and PDFGenerator\n con: MyContribution = getattr(recipe, con_name)\n gen: Union[PDFGenerator, DebyePDFGenerator] = getattr(con, gen_name)\n # add scale\n name = f'scale_{gen.name}'\n variables[name] = recipe.addVar(gen.scale, name=name, value=dv.get(name, 0.)).boundRange(\n *bounds.get(name, (0., np.inf)))\n # add delta2\n name = f'delta2_{gen.name}'\n variables[name] = recipe.addVar(gen.delta2, name=name, value=dv.get(name, 0.)).boundRange(\n *bounds.get(name, (0., np.inf)))\n # constrain by spacegroup\n sgpars = get_sgpars(gen.phase, sg)\n # add latpars\n for par in sgpars.latpars:\n name = f'{par.name}_{gen.name}'\n variables[name] = recipe.addVar(par, name=name, value=dv.get(name, par.value),\n tag=f'lat_{gen.name}').boundRange(*bounds.get(name, (0., 2. * par.value)))\n # constrain adps\n atoms = gen.phase.getScatterers()\n elements = set([atom.element for atom in atoms])\n adp = dict()\n for element in elements:\n name = f'Biso_{only_alpha(element)}_{gen.name}'\n variables[name] = adp[element] = recipe.newVar(name, value=dv.get(name, 0.05),\n tag=f'adp_{gen.name}').boundRange(\n *bounds.get(name, (0, np.inf)))\n for atom in atoms:\n recipe.constrain(getattr(atom, 'Biso'), adp[atom.element])\n # add xyzpars\n if add_xyz:\n for par in sgpars.xyzpars:\n name = f'{par.name}_{gen.name}'\n variables[name] = recipe.addVar(par, name=name, value=dv.get(name, par.value),\n tag=f'xyz_{gen.name}', fixed=True).boundRange(\n *bounds.get(name, (-np.inf, np.inf)))\n return variables\n\n\ndef get_sgpars(parset: Union[ObjCrystCrystalParSet, DiffpyStructureParSet], sg: Union[int, str] = None):\n \"\"\"Constrain the structure by space group and get the independent parameters.\"\"\"\n if isinstance(parset, ObjCrystCrystalParSet):\n if sg is not None:\n print(\n \"ObjCrystCrystalParSet does not accept explicit space group constrain. \"\n \"Implicit space group is used.\"\n )\n sgpars = parset.sgpars\n elif isinstance(parset, DiffpyStructureParSet):\n if sg is None:\n sg = 'P1'\n print(\n \"No explicit space group for DiffpyStructureParSet. \"\n \"Use 'P1' symmetry.\"\n )\n sgpars = constrainAsSpaceGroup(\n parset, sg, constrainadps=False\n )\n else:\n raise ValueError(\n \"{} does not allow space group constrain.\".format(type(parset))\n )\n return sgpars\n\n\ndef sgconstrain_all(recipe: MyRecipe, dv: dict = None, bounds: dict = None) -> dict:\n \"\"\"Use space group to constrain all the generators in the recipe. See sgconstrain for details.\n\n Parameters\n ----------\n recipe : MyRecipe\n The recipe where variables will be added.\n\n dv : dict\n The keys are the names of variables and the values are the initial value for optimization.\n\n bounds : dict\n The keys are the names of variables and the keys are the tuple of start and end or single value for window.\n \"\"\"\n variables = dict()\n for con_name, con in recipe.contributions.items():\n for gen_name, gen in con.generators.items():\n if isinstance(gen, (DebyePDFGenerator, PDFGenerator)):\n variables.update(\n sgconstrain(\n recipe, con_name, gen_name, dv=dv, bounds=bounds\n )\n )\n return variables\n\n\ndef only_alpha(s: str):\n \"\"\"Remove all characters other than alphabets. Use to get a valid variable name.\"\"\"\n return ''.join((c for c in s if c.isalpha()))\n", "sub_path": "pdffitx/modeling/fitfuncs.py", "file_name": "fitfuncs.py", "file_ext": "py", "file_size_in_byte": 14288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "fitobjs.MyParser", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 36, "usage_type": "name"}, {"api_name": "diffpy.srfit.fitbase.Profile", "line_number": 53, "usage_type": "call"}, {"api_name": "diffpy.srfit.fitbase.Profile", "line_number": 36, "usage_type": "name"}, {"api_name": "fitobjs.GenConfig", "line_number": 60, "usage_type": "name"}, {"api_name": "diffpy.srfit.pdf.DebyePDFGenerator", "line_number": 74, "usage_type": "call"}, {"api_name": "diffpy.srfit.pdf.PDFGenerator", "line_number": 74, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 60, "usage_type": "name"}, {"api_name": "diffpy.srfit.pdf.PDFGenerator", "line_number": 60, "usage_type": "name"}, {"api_name": "diffpy.srfit.pdf.DebyePDFGenerator", "line_number": 60, "usage_type": "name"}, {"api_name": "fitobjs.ConConfig", "line_number": 83, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyContribution", "line_number": 100, "usage_type": "call"}, {"api_name": "pdffitx.modeling.core.MyContribution", "line_number": 83, "usage_type": "name"}, {"api_name": "fitobjs.ConConfig", "line_number": 126, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyRecipe", "line_number": 140, "usage_type": "call"}, {"api_name": "pdffitx.modeling.core.MyRecipe", "line_number": 126, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyRecipe", "line_number": 151, "usage_type": "name"}, {"api_name": "scipy.optimize.least_squares", "line_number": 175, "usage_type": "call"}, {"api_name": "diffpy.srfit.fitbase.FitContribution", "line_number": 180, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "pdfstream.visualization.main.visualize", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.axes.Axes", "line_number": 180, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyRecipe", "line_number": 214, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 214, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 215, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyContribution", "line_number": 249, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 263, "usage_type": "attribute"}, {"api_name": "diffpy.srfit.fitbase.parameter.ParameterProxy", "line_number": 215, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyRecipe", "line_number": 267, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyRecipe", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 278, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 278, "usage_type": "name"}, {"api_name": "pdffitx.modeling.core.MyContribution", "line_number": 334, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 335, "usage_type": "name"}, {"api_name": "diffpy.srfit.pdf.PDFGenerator", "line_number": 335, "usage_type": "name"}, {"api_name": "diffpy.srfit.pdf.DebyePDFGenerator", "line_number": 335, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 339, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 343, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 359, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 368, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 279, "usage_type": "name"}, {"api_name": "diffpy.srfit.fitbase.parameter.ParameterProxy", "line_number": 279, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 372, "usage_type": "name"}, {"api_name": "diffpy.srfit.structure.objcrystparset.ObjCrystCrystalParSet", "line_number": 372, "usage_type": "name"}, {"api_name": "diffpy.srfit.structure.diffpyparset.DiffpyStructureParSet", "line_number": 372, "usage_type": "name"}, {"api_name": "diffpy.srfit.structure.objcrystparset.ObjCrystCrystalParSet", "line_number": 374, "usage_type": "argument"}, {"api_name": "diffpy.srfit.structure.diffpyparset.DiffpyStructureParSet", "line_number": 381, "usage_type": "argument"}, {"api_name": "diffpy.srfit.structure.sgconstraints.constrainAsSpaceGroup", "line_number": 388, "usage_type": "call"}, {"api_name": "pdffitx.modeling.core.MyRecipe", "line_number": 398, "usage_type": "name"}, {"api_name": "diffpy.srfit.pdf.DebyePDFGenerator", "line_number": 415, "usage_type": "name"}, {"api_name": "diffpy.srfit.pdf.PDFGenerator", "line_number": 415, "usage_type": "name"}]} +{"seq_id": "176089566", "text": "import re\nimport os\n\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request, HtmlResponse\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.utils.url import urljoin_rfc\nfrom urllib import urlencode\nimport hashlib\n\nimport csv\n\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip\\\n as ProductLoader\nfrom scrapy import log\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nclass HealthSpanSpider(BaseSpider):\n name = 'healthspan.co.uk-merckgroup'\n allowed_domains = ['www.healthspan.co.uk', 'healthspan.co.uk']\n start_urls = ('http://www.healthspan.co.uk/products/',)\n\n def parse(self, response):\n if not isinstance(response, HtmlResponse):\n return\n hxs = HtmlXPathSelector(response)\n\n # getting product links from A-Z product list\n links = hxs.select('//td[@class=\"itemL\"]/span/a/@href').extract()\n for prod_url in links:\n url = urljoin_rfc(get_base_url(response), prod_url)\n yield Request(url)\n\n # products\n for product in self.parse_product(response):\n yield product\n\n def parse_product(self, response):\n if not isinstance(response, HtmlResponse):\n return\n hxs = HtmlXPathSelector(response)\n\n name = hxs.select('//h1[@class=\"item\"]/span/text()').extract()\n if name:\n url = response.url\n url = urljoin_rfc(get_base_url(response), url)\n loader = ProductLoader(item=Product(), selector=hxs)\n loader.add_value('url', url)\n loader.add_value('name', name[0])\n\n items = hxs.select('//div[@class=\"sku-details\"]')\n for item in items:\n loader = ProductLoader(item=Product(), selector=hxs)\n loader.add_value('url', url)\n #loader.add_value('name', name[0])\n n = name[0].strip()\n sku = ''.join(item.select('.//span[@class=\"sku-description\"]//text()').extract())\n if sku:\n n += ' ' + sku.strip()\n\n loader.add_value('name', n)\n price = item.select('./span[@class=\"price\"]/text()').extract()\n if price:\n loader.add_value('price', price[0])\n else:\n price = item.select('./span[@class=\"special-price\"]/text()').extract()\n loader.add_value('price', price[0])\n yield loader.load_item()\n", "sub_path": "portfolio/Python/scrapy/merckgroup/healthspan.py", "file_name": "healthspan.py", "file_ext": "py", "file_size_in_byte": 2548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "scrapy.spider.BaseSpider", "line_number": 20, "usage_type": "name"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 26, "usage_type": "argument"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 28, "usage_type": "call"}, {"api_name": "scrapy.utils.url.urljoin_rfc", "line_number": 33, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 33, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 34, "usage_type": "call"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 41, "usage_type": "argument"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 43, "usage_type": "call"}, {"api_name": "scrapy.utils.url.urljoin_rfc", "line_number": 48, "usage_type": "call"}, {"api_name": "scrapy.utils.response.get_base_url", "line_number": 48, "usage_type": "call"}, {"api_name": "product_spiders.items.ProductLoaderWithNameStrip", "line_number": 49, "usage_type": "call"}, {"api_name": "product_spiders.items.Product", "line_number": 49, "usage_type": "call"}, {"api_name": "product_spiders.items.ProductLoaderWithNameStrip", "line_number": 55, "usage_type": "call"}, {"api_name": "product_spiders.items.Product", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "430077275", "text": "#!/usr/bin/env python\nfrom flask import Flask, render_template, session, request, flash\nfrom flask_socketio import SocketIO, emit \nimport time\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\n\nsocketio = SocketIO(app)\n\n@app.route('/')\ndef index():\n return render_template('simple_joystick.html')\n\nif __name__ == '__main__':\n socketio.run(app, debug=True, host='0.0.0.0',port=5000)", "sub_path": "web_joystick.py", "file_name": "web_joystick.py", "file_ext": "py", "file_size_in_byte": 396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask_socketio.SocketIO", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "441928481", "text": "from rest_framework import serializers, routers, viewsets, permissions\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.reverse import reverse\nfrom rest_framework import generics\n\nfrom .item_serializer import ItemSerializer\nfrom ..items.models import Category\n\nclass CategoryUrlField(serializers.HyperlinkedIdentityField):\n def get_url(self, obj, view_name, request, format):\n kwargs = {\n 'slug': obj.slug\n }\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n\n\nclass CategorySerializer(serializers.HyperlinkedModelSerializer):\n url = CategoryUrlField(view_name='category_retrieve_api')\n item_set = ItemSerializer(many=True, read_only=True)\n class Meta:\n model = Category\n fields = [\n 'id',\n 'url',\n 'slug',\n 'name',\n 'item_set'\n ]\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n# It just lists, for you to see. Can't POST or DELETE\nclass CategoryListAPIView(generics.ListAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n paginate_by = 5\n\n\nclass CategoryCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('name', 'slug')\n\n\nclass CategoryCreateAPIView(generics.CreateAPIView):\n serializer_class = CategorySerializer\n fields = ('name', 'slug')\n\n# GET only\nclass CategoryRetrieveAPIView(generics.RetrieveAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n def get_object(self):\n slug = self.kwargs.pop(\"slug\")\n obj = get_object_or_404(Category, slug=slug)\n return obj\n", "sub_path": "ondernemer/api/category_serializer.py", "file_name": "category_serializer.py", "file_ext": "py", "file_size_in_byte": 1996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.serializers.HyperlinkedIdentityField", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 18, "usage_type": "name"}, {"api_name": "item_serializer.ItemSerializer", "line_number": 20, "usage_type": "call"}, {"api_name": "items.models.Category", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 32, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 32, "usage_type": "name"}, {"api_name": "items.models.Category.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "items.models.Category.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "items.models.Category", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 36, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 36, "usage_type": "name"}, {"api_name": "items.models.Category.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "items.models.Category.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "items.models.Category", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 41, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 41, "usage_type": "name"}, {"api_name": "items.models.Category.objects.all", "line_number": 42, "usage_type": "call"}, {"api_name": "items.models.Category.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "items.models.Category", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 47, "usage_type": "name"}, {"api_name": "items.models.Category", "line_number": 49, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 53, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 58, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 58, "usage_type": "name"}, {"api_name": "items.models.Category.objects.all", "line_number": 59, "usage_type": "call"}, {"api_name": "items.models.Category.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "items.models.Category", "line_number": 59, "usage_type": "name"}, {"api_name": "rest_framework.generics.get_object_or_404", "line_number": 64, "usage_type": "call"}, {"api_name": "items.models.Category", "line_number": 64, "usage_type": "argument"}]} +{"seq_id": "104411663", "text": "# 队列\n# 生产者消费者模型\n# 买包子案例\n\nimport time\nimport random\nfrom multiprocessing import Process, Queue, JoinableQueue\n\n\n# 生产者\ndef producer(name, food, q):\n for i in range(3):\n time.sleep(random.randint(1, 2))\n f = \"%s生产了%s%s\" % (name, food, i)\n print(f)\n q.put(f)\n q.join() # 阻塞直到一个队列中的所有数据处理完毕 感知一个队里中的数据,是否全部被执行完毕\n\n\n# 消费者\ndef consumer(q, name):\n while True:\n food = q.get()\n f = '\\033[31m%s消费了%s\\033[0m' % (name, food)\n print(f)\n time.sleep(random.randint(1, 2))\n q.task_done()\n\n\nif __name__ == \"__main__\":\n q = JoinableQueue(20)\n p = Process(target=producer, args=(\"Egon\", \"包子\", q))\n p.start()\n\n p1 = Process(target=producer, args=(\"king\", \"饺子\", q))\n p1.start()\n\n c = Process(target=consumer, args=(q, \"ban\"))\n c.daemon = True # 守护进程\n c.start()\n\n c1 = Process(target=consumer, args=(q, \"jiang\"))\n c1.daemon = True\n c1.start()\n\n p.join()\n p1.join() # 感知一个进程的是否运行完\n # q.put(None)\n # q.put(None)\n\n# 在消费者这段\n # 在每次获取一个数据\n # 处理一个数据\n # 发送一个记号:标记一个数据被处理\n\n# 在生产者这一端:\n # 每一次生产一个数据\n # 且每一次生产的数据都放在队列中\n # 在队列中刻上一个记号\n # 当生产者全部生产完毕之后\n # join信号:已经停止生产数据了\n # 且要等待之前被刻上的记号都要被消费完\n # 当数据都被处理完时,join阻塞结束\n\n\n", "sub_path": "多线程/生产者消费者模型.py", "file_name": "生产者消费者模型.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "multiprocessing.JoinableQueue", "line_number": 31, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 32, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 35, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 38, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "39604166", "text": "import json\nfrom bson import json_util\nfrom bson.objectid import ObjectId\nfrom flask import jsonify\nfrom DAL.databases import mongoDB\n\n\ndef insert_award(awards_object):\n try:\n res = mongoDB.awards_collection.insert(awards_object)\n except Exception as error:\n return error\n else:\n return str(res)\n\n\ndef delete_award(user_id):\n result_award = mongoDB.awards_collection.find({'userId': ObjectId(user_id)})\n res = json.loads(json_util.dumps(result_award))\n id_award = res[0]['_id']['$oid']\n if mongoDB.awards_collection.delete_one({\"_id\": ObjectId(id_award)}):\n return True\n else:\n return False\n\n\ndef update_awards(user_id, params):\n try:\n response = mongoDB.awards_collection.update({\"userId\": ObjectId(user_id)}, {\"$set\": params})\n except Exception as error:\n return error\n else:\n if response:\n return True\n else:\n return False\n\n\ndef find_user_awards(user_id):\n try:\n result_award = mongoDB.awards_collection.find({'userId': ObjectId(user_id)})\n res = json.loads(json_util.dumps(result_award))\n\n except Exception as error:\n return error\n else:\n return jsonify(res[0])\n", "sub_path": "account-service/controllers/awards_controllers.py", "file_name": "awards_controllers.py", "file_ext": "py", "file_size_in_byte": 1221, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "DAL.databases.mongoDB.awards_collection.insert", "line_number": 10, "usage_type": "call"}, {"api_name": "DAL.databases.mongoDB.awards_collection", "line_number": 10, "usage_type": "attribute"}, {"api_name": "DAL.databases.mongoDB", "line_number": 10, "usage_type": "name"}, {"api_name": "DAL.databases.mongoDB.awards_collection.find", "line_number": 18, "usage_type": "call"}, {"api_name": "DAL.databases.mongoDB.awards_collection", "line_number": 18, "usage_type": "attribute"}, {"api_name": "DAL.databases.mongoDB", "line_number": 18, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 19, "usage_type": "name"}, {"api_name": "DAL.databases.mongoDB.awards_collection.delete_one", "line_number": 21, "usage_type": "call"}, {"api_name": "DAL.databases.mongoDB.awards_collection", "line_number": 21, "usage_type": "attribute"}, {"api_name": "DAL.databases.mongoDB", "line_number": 21, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 21, "usage_type": "call"}, {"api_name": "DAL.databases.mongoDB.awards_collection.update", "line_number": 29, "usage_type": "call"}, {"api_name": "DAL.databases.mongoDB.awards_collection", "line_number": 29, "usage_type": "attribute"}, {"api_name": "DAL.databases.mongoDB", "line_number": 29, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 29, "usage_type": "call"}, {"api_name": "DAL.databases.mongoDB.awards_collection.find", "line_number": 41, "usage_type": "call"}, {"api_name": "DAL.databases.mongoDB.awards_collection", "line_number": 41, "usage_type": "attribute"}, {"api_name": "DAL.databases.mongoDB", "line_number": 41, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 41, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "280138818", "text": "#!/usr/bin/env python3\n# encoding: utf-8\n\"\"\"\n@author: vo4f\n@project: PyCharm\n@file: utils.py\n@time: 2017/10/25 15:54\n@doc: Some usefully functions\n\"\"\"\nimport numpy as np\nimport calculator\nimport matplotlib.pyplot as plt\n\n\ndef init_center(data, num_clusters):\n \"\"\"\n Init the raw data set, random choose centroid\n :param data: np array\n :param num_clusters: number of clusters\n :return: centroid list\n \"\"\"\n tmp = data.tolist()\n np.random.shuffle(tmp)\n return tmp[:num_clusters]\n\n\ndef isconverged(centroid1, centroid2):\n \"\"\"\n Check the KMeans is converged or not\n :param centroid1: old centroids\n :param centroid2: current centroids\n :return: the boolean\n \"\"\"\n return np.array_equal(centroid1, centroid2)\n\n\ndef calc_dist_array(data):\n m = data.shape[0]\n dist_array = np.ones((m, m), dtype=float)\n for i in range(m):\n for j in range(m):\n dist_array[i, j] = calculator.dist_euclidean(data[i], data[j])\n # with open('dist_array.data', 'w') as f:\n # for i in range(m):\n # words = ''\n # for j in range(m):\n # words += str(dist_array[i, j]) + ','\n # f.writelines(words + '\\n')\n return dist_array\n\n\ndef plot_points(data):\n # x = data[:, 0]\n # y = data[:, 1]\n # fig = plt.figure()\n # ax1 = fig.add_subplot(111)\n # ax1.set_title('data')\n # plt.xlabel('x')\n # plt.ylabel('y')\n # ax1.scatter(x, y, c='r', marker='.')\n # plt.legend('x1')\n # plt.show()\n fig, ax = plt.subplots()\n colors = ['r', 'g', 'b', 'y', 'c']\n markers = ['o', 'v', 'x', '*', 's']\n for n, i in enumerate(data):\n x = i[:, 0]\n y = i[:, 1]\n ax.scatter(x, y, color=colors[n], marker=markers[n])\n plt.show()\n\n\ndef plot_raw(data):\n x = data[:, 0]\n y = data[:, 1]\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.set_title('data')\n plt.xlabel('x')\n plt.ylabel('y')\n ax1.scatter(x, y, c='r', marker='.')\n plt.legend('x1')\n plt.show()\n\n\ndef divide_data(data):\n \"\"\"\n divide the data to count it's values\n :param data:\n :return:\n \"\"\"\n count = {}\n for i in data:\n count[str(i)] = count.get(str(i), 0) + 1\n return count\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.random.shuffle", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 39, "usage_type": "call"}, {"api_name": "calculator.dist_euclidean", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "378499479", "text": "from django.conf.urls import patterns, include, url\nfrom django.conf import settings\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^admin/', include('admin.urls')),\n (r'^supervise/', include('supervise.urls')),\n (r'^auth/', include('auth.urls')),\n (r'^module/', include('module.urls')),\n (r'^dict/', include('dict.urls')),\n (r'^soft_enter/', include('soft_enter.urls')),\n (r'^advanced_student/', include('advanced_student.urls')),\n (r'^pay/', include('payment.urls')),\n (r'^people/', include('people.urls')),\n (r'^example/', include('example.urls')),\n (r'^supervise/$', 'auth.views.login',),\n (r'^gct/', include('gct.urls')),\n\n (r'^$', 'gsprodegree.views.jump',),\n (r'^supervise/home/$', 'gsprodegree.views.home',),\n (r'^password/$', 'gsprodegree.views.password',),\n)\n\nurlpatterns += patterns('',\n (r'^site_media/(?P.*)$', 'django.views.static.serve', \n {'document_root': settings.MEDIA_ROOT}),\n)\n", "sub_path": "gsprodegree/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.patterns", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "247888455", "text": "from datetime import datetime, timedelta\nfrom random import choice\n\nfrom flask_restful import Resource\nfrom flasgger import swag_from\nfrom flask import Response, jsonify, abort, request, g\nfrom flask_jwt_extended import jwt_required\n\nfrom docs.solve import SOLVE_GET, SOLVE_POST\nfrom model.game import GameModel\nfrom model.problem import ProblemModel\nfrom model.booth import BoothModel\nfrom util import set_g_object\n\n\nclass SolveView(Resource):\n\n def _check_time(self, game: GameModel):\n now: datetime = datetime.now()\n if now < game.start_time:\n abort(406)\n if game.end_time <= now:\n abort(412)\n\n @swag_from(SOLVE_GET)\n @jwt_required\n @set_g_object\n def get(self, boothName: str) -> Response:\n\n self._check_time(g.game)\n\n booth: BoothModel = BoothModel.objects(booth_name=boothName).first()\n if not booth:\n return Response('', 204)\n\n if booth.own_team == g.user.team:\n return Response('', 205)\n\n if booth.next_capture_time > datetime.now():\n abort(408)\n\n problem: ProblemModel = choice(ProblemModel.objects())\n\n response = {'boothName': boothName,\n 'problemId': problem.problem_id,\n 'content': problem.content,\n 'choices': problem.choices}\n\n return jsonify(response)\n\n @swag_from(SOLVE_POST)\n @jwt_required\n @set_g_object\n def post(self, boothName: str) -> Response:\n\n self._check_time(g.game)\n\n payload: dict = request.json\n\n problem: ProblemModel = ProblemModel.objects(problem_id=payload['problemId']).first()\n booth: BoothModel = BoothModel.objects(booth_name=boothName).first()\n if not all((problem, booth)):\n return Response('', 204)\n\n if booth.next_capture_time > datetime.now():\n abort(408)\n\n if payload['answer'] != problem.answer:\n return Response('', 205)\n\n booth.own_team = g.user.team\n booth.next_capture_time = datetime.now() + timedelta(minutes=1)\n booth.save()\n\n return Response('', 201)\n", "sub_path": "Server/view/solve.py", "file_name": "solve.py", "file_ext": "py", "file_size_in_byte": 2131, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask_restful.Resource", "line_number": 16, "usage_type": "name"}, {"api_name": "model.game.GameModel", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.g.game", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 30, "usage_type": "name"}, {"api_name": "model.booth.BoothModel", "line_number": 32, "usage_type": "name"}, {"api_name": "model.booth.BoothModel.objects", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 40, "usage_type": "call"}, {"api_name": "model.problem.ProblemModel", "line_number": 42, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 42, "usage_type": "call"}, {"api_name": "model.problem.ProblemModel.objects", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flasgger.swag_from", "line_number": 25, "usage_type": "call"}, {"api_name": "docs.solve.SOLVE_GET", "line_number": 25, "usage_type": "argument"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 26, "usage_type": "name"}, {"api_name": "util.set_g_object", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.g.game", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "model.problem.ProblemModel", "line_number": 60, "usage_type": "name"}, {"api_name": "model.problem.ProblemModel.objects", "line_number": 60, "usage_type": "call"}, {"api_name": "model.booth.BoothModel", "line_number": 61, "usage_type": "name"}, {"api_name": "model.booth.BoothModel.objects", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.g.user", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 71, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 75, "usage_type": "call"}, {"api_name": "flasgger.swag_from", "line_number": 51, "usage_type": "call"}, {"api_name": "docs.solve.SOLVE_POST", "line_number": 51, "usage_type": "argument"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 52, "usage_type": "name"}, {"api_name": "util.set_g_object", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "541335762", "text": "#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\nfrom flask_migrate import Migrate # added for the migration\nimport datetime\nfrom sqlalchemy import func # Ensure it is case-insensitive the search term.\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\n# TODO: connect to a local postgresql database\n\nmigrate = Migrate(app, db)\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n\n\nclass Shows(db.Model):\n __tablename__ = 'shows'\n artist_id = db.Column(db.Integer(), db.ForeignKey('artist.id'),primary_key=True)\n venue_id = db.Column(db.Integer(), db.ForeignKey('venue.id'),primary_key=True)\n start_date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\n\nclass Venue(db.Model):\n __tablename__ = 'venue'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n address = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n genres = db.Column(db.String(120))\n website = db.Column(db.String(120))\n seeking_description = db.Column(db.String(120))\n\n\n def __repr__(self):\n return f\"\\n\"\n\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n\nclass Artist(db.Model):\n __tablename__ = 'artist'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n genres = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n website = db.Column(db.String(120))\n seeking_description = db.Column(db.String(120))\n\n venues = db.relationship('Venue', secondary='shows', backref=db.backref('artists', lazy=True))\n\n def __repr__(self):\n return f\"\\n\"\n\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n\n# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n data = []\n allVenue = db.session.execute('select DISTINCT city, state from venue;').fetchall()\n \n for v in allVenue: \n mydata = {}\n mydata['city'] = v.city\n mydata['state'] = v.state\n mydata['venues'] = []\n venues = Venue.query.filter_by(state=v.state,city=v.city).all()\n for venue in venues:\n myVenue = {}\n myVenue[\"id\"] = venue.id\n myVenue[\"name\"] = venue.name\n myVenue[\"num_upcoming_shows\"] = db.session.execute('select count(venue_id) from shows where venue_id='+ str(venue.id) \n +' and start_date > CURRENT_TIMESTAMP;').fetchone()[0]\n mydata['venues'].append(myVenue)\n data.append(mydata)\n\n return render_template('pages/venues.html', areas=data)\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n\n search_word = request.form.get('search_term', '')\n resp = Venue.query.filter(func.lower(Venue.name).contains( search_word.lower() )).all()\n response = {}\n response['count'] = len(resp)\n response['data'] = [] \n for d in resp: \n myVenue = {}\n myVenue[\"id\"] = d.id\n myVenue[\"name\"] = d.name\n myVenue[\"num_upcoming_shows\"] = db.session.execute('select count(venue_id) from shows where venue_id='+ str(d.id) \n +' and start_date > CURRENT_TIMESTAMP;').fetchone()[0]\n response['data'].append(myVenue)\n\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n venue = Venue.query.get(venue_id)\n if not venue :\n form = VenueForm()\n flash('Venue of id ' + str(venue_id) + ' does not exist create NEW Venue here !')\n return render_template('forms/new_venue.html', form=form)\n\n\n mydata = {}\n mydata['id'] = venue.id\n mydata['name'] = venue.name\n mydata['genres'] = venue.genres\n mydata['address'] = venue.address\n mydata['city'] = venue.city\n mydata['state'] = venue.state\n mydata['phone'] = venue.phone\n mydata['website'] = venue.website\n mydata['facebook_link'] = venue.facebook_link\n mydata['seeking_description'] = venue.seeking_description\n mydata['image_link'] = venue.image_link\n mydata['upcoming_shows'] = []\n mydata['past_shows'] = []\n pastShows = db.session.execute('select artist_id,start_date from shows where venue_id='+ str(venue_id) +' and start_date < CURRENT_TIMESTAMP;').fetchall()\n upShows = db.session.execute('select artist_id,start_date from shows where venue_id='+ str(venue_id) +' and start_date > CURRENT_TIMESTAMP;').fetchall()\n \n for past_show in pastShows:\n ps = {}\n ps['artist_id'] = past_show['artist_id']\n ps['artist_name'] = Artist.query.get(past_show['artist_id']).name\n ps['artist_image_link'] = Artist.query.get(past_show['artist_id']).image_link\n ps['start_time'] = past_show['start_date']\n mydata['past_shows'].append(ps)\n\n for up_artist_id in upShows: \n ups = {}\n ups['artist_id'] = up_artist_id['artist_id']\n ups['artist_name'] = Artist.query.get(up_artist_id['artist_id']).name\n ups['artist_image_link'] = Artist.query.get(up_artist_id['artist_id']).image_link\n ups['start_time'] = up_artist_id['start_date']\n mydata['upcoming_shows'].append(ups) \n\n mydata['past_shows_count'] = len(pastShows)\n mydata['upcoming_shows_count'] =len(upShows)\n\n return render_template('pages/show_venue.html', venue=mydata)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n\n newVenue = Venue(name=request.form['name'],city=request.form['city'],state=request.form['state'],\n address=request.form['address'],phone=request.form['phone'],genres=request.form['genres'],\n facebook_link=request.form['facebook_link'])\n\n try:\n db.session.add(newVenue)\n db.session.commit()\n flash('Venue ' + request.form['name'] + ' was Unsuccessfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Venue ' + request.form['name'] + ' could not be listed.!')\n \n return render_template('pages/home.html')\n\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n if not Venue.query.get(venue_id) :\n flash('Venue of id ' + str(venue_id) + ' does not exist To delete it')\n return render_template('pages/home.html')\n\n try:\n Venue.query.filter_by(id=venue_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n \n return render_template('pages/home.html')\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n # TODO: replace with real data returned from querying the database\n data = Artist.query.all()\n\n return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for \"A\" should return \"Guns N Petals\", \"Matt Quevado\", and \"The Wild Sax Band\".\n # search for \"band\" should return \"The Wild Sax Band\".\n\n search_word = request.form.get('search_term', '')\n \n resp = Artist.query.filter(func.lower(Artist.name).contains(search_word.lower())).all()\n response = {}\n response['count'] = len(resp)\n response['data'] = [] \n for d in resp: \n myArtist = {}\n myArtist[\"id\"] = d.id\n myArtist[\"name\"] = d.name\n myArtist[\"num_upcoming_shows\"] = db.session.execute('select count(artist_id) from shows where artist_id='+ str(d.id) +' and start_date > CURRENT_TIMESTAMP;').fetchone()[0]\n \n response['data'].append(myArtist)\n\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n # shows the venue page with the given venue_id\n # TODO: replace with real venue data from the venues table, using venue_id\n\n artist = Artist.query.get(artist_id)\n if not artist :\n form = ArtistForm()\n flash('Artist of id ' + str(artist_id) + ' does not exist create NEW Artist here !')\n return render_template('forms/new_artist.html', form=form)\n\n mydata = {}\n mydata['id'] = artist.id\n mydata['name'] = artist.name\n if artist.genres : mydata['genres'] = artist.genres\n mydata['city'] = artist.city\n mydata['state'] = artist.state\n mydata['phone'] = artist.phone\n # mydata['website'] = artist.website\n mydata['facebook_link'] = artist.facebook_link\n if artist.seeking_description:\n mydata['seeking_talent'] = True\n mydata['seeking_description'] = artist.seeking_description\n\n mydata['image_link'] = artist.image_link\n mydata['upcoming_shows'] = []\n mydata['past_shows'] = []\n pastShows = db.session.execute('select venue_id, start_date from shows where artist_id='+ str(artist_id) +' and start_date < CURRENT_TIMESTAMP;').fetchall()\n upShows = db.session.execute('select venue_id, start_date from shows where artist_id='+ str(artist_id) +' and start_date > CURRENT_TIMESTAMP;').fetchall()\n\n for past_show in pastShows:\n ps = {}\n ps['venue_id'] = past_show['venue_id']\n ps['venue_name'] = Venue.query.get(past_show['venue_id']).name\n ps['venue_image_link'] = Venue.query.get(past_show['venue_id']).image_link\n ps['start_time'] = past_show['start_date']\n mydata['past_shows'].append(ps)\n\n for up_venue_id in upShows: \n ups = {}\n ups['venue_id'] = up_venue_id['venue_id']\n ups['venue_name'] = Venue.query.get(up_venue_id['venue_id']).name\n ups['venue_image_link'] = Venue.query.get(up_venue_id['venue_id']).image_link\n ups['start_time'] = up_venue_id['start_date']\n mydata['upcoming_shows'].append(ups) \n\n mydata['past_shows_count'] = len(pastShows)\n mydata['upcoming_shows_count'] = len(upShows)\n \n return render_template('pages/show_artist.html', artist=mydata)\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n form = ArtistForm()\n\n artist = Artist.query.get(artist_id)\n if not artist :\n flash('Artist of id ' + str(artist_id) + ' does not exist create NEW Artist here !')\n return render_template('forms/new_artist.html', form=form)\n\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n # artist record with ID using the new attributes\n\n artist = Artist.query.get(artist_id)\n artist.name = request.form['name']\n artist.genres = request.form['genres']\n artist.city = request.form['city']\n artist.state = request.form['state']\n artist.phone = request.form['phone']\n # artist.website = request.form['website']\n artist.facebook_link = request.form['facebook_link']\n # artist.image_link = request.form['image_link']\n try:\n db.session.add(artist)\n db.session.commit()\n flash('Artist ' + request.form['name'] + ' was successfully Updated!')\n except:\n db.session.rollback()\n flash('An error occurred. Artist ' + artist.name + ' could not be Updated.')\n\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n form = VenueForm()\n\n venue = Venue.query.get(venue_id)\n if not venue :\n flash('Venue of id ' + str(venue_id) + ' does not exist create NEW Venue here !')\n return render_template('forms/new_venue.html', form=form)\n\n # TODO: populate form with values from venue with ID \n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n # venue record with ID using the new attributes\n venue = Venue.query.get(venue_id)\n venue.name = request.form['name']\n venue.genres = request.form['genres']\n venue.city = request.form['city']\n venue.address = request.form['address']\n venue.state = request.form['state']\n venue.phone = request.form['phone']\n # venue.website = request.form['website']\n venue.facebook_link = request.form['facebook_link']\n # venue.image_link = request.form['image_link']\n\n try:\n db.session.add(venue)\n db.session.commit()\n flash('Venue ' + request.form['name'] + ' was successfully Updated!')\n except:\n db.session.rollback()\n flash('An error occurred. Venue ' + venue.name + ' could not be Updated.')\n\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n\n newArtist = Artist(name=request.form['name'],city=request.form['city'],state=request.form['state'],\n phone=request.form['phone'],genres=request.form['genres'],\n facebook_link=request.form['facebook_link'])\n try:\n db.session.add(newArtist)\n db.session.commit()\n flash('Artist ' + request.form['name'] + ' was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Artist ' + newArtist.name + ' could not be listed.')\n \n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n data = []\n shows_ = db.session.execute('select venue_id, artist_id, start_date from shows;').fetchall()\n for show in shows_:\n myShow ={}\n myShow['venue_id'] = show.venue_id\n myShow['venue_name'] = Venue.query.get(show.venue_id).name\n myShow['artist_id'] = show.artist_id\n myShow['artist_name'] = Artist.query.get(show.artist_id).name\n myShow['artist_image_link'] = Artist.query.get(show.artist_id).image_link\n myShow['start_time'] = show.start_date\n data.append(myShow)\n \n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n try:\n # on successful db insert, flash success\n artist_id = request.form['artist_id']\n venue_id = request.form['venue_id']\n start_date = request.form['start_time']\n db.session.execute(\"INSERT INTO shows (artist_id, venue_id, start_date) VALUES(\"+ artist_id +\",\"+ venue_id +\",'\"+ start_date +\"');\")\n db.session.commit()\n flash('Show was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Show could not be listed.')\n return render_template('pages/home.html')\n\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n", "sub_path": "projects/01_fyyur/starter_code/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 17923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_moment.Moment", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_migrate.Migrate", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 95, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 95, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 95, "usage_type": "name"}, {"api_name": "babel.dates.format_datetime", "line_number": 100, "usage_type": "call"}, {"api_name": "babel.dates", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "sqlalchemy.func.lower", "line_number": 142, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 161, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 209, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 215, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 215, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 216, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 216, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 221, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 221, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 221, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 224, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 224, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 226, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 232, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 233, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 241, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 250, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 258, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 258, "usage_type": "name"}, {"api_name": "sqlalchemy.func.lower", "line_number": 260, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 260, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 272, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 272, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 282, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 283, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 323, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 333, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 334, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 336, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 344, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 344, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 345, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 345, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 346, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 346, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 347, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 347, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 348, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 348, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 350, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 350, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 355, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 355, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 355, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 358, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 360, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 360, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 369, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 370, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 373, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 379, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 379, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 380, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 380, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 381, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 381, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 382, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 382, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 383, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 383, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 384, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 384, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 386, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 386, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 392, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 392, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 392, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 395, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 397, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 397, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 405, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 410, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 410, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 411, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 411, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 412, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 412, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 416, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 416, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 416, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 419, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 421, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 441, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 447, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 453, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 453, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 454, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 454, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 455, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 455, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 458, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 461, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 462, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 467, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 471, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 475, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 477, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 479, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 480, "usage_type": "attribute"}]} +{"seq_id": "567082594", "text": "#!/usr/bin/env python\n\"\"\"\n| Copyright (C) 2020 Johannes Schlatow\n| TU Braunschweig, Germany\n| All rights reserved.\n| See LICENSE file for copyright and license details.\n\n:Authors:\n - Johannes Schlatow\n\nDescription\n-----------\n\n\"\"\"\n\nimport argparse\nfrom eval import generated\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom pandas.api.types import CategoricalDtype\n\nparser = argparse.ArgumentParser(description='Print statistics.')\nparser.add_argument('folders', type=str, nargs='+',\n help=\"Folder containing the subfolders with different analysis results in separate results.csv files.\")\nparser.add_argument('--width', type=float, default=10)\nparser.add_argument('--height', type=float, default=4)\nparser.add_argument('--titles', type=str, nargs='*', default=None)\nparser.add_argument('--order', type=str, nargs='*', default=list())\nparser.add_argument('--xlabel', type=str, default=None)\nparser.add_argument('--ylabel', type=str)\nparser.add_argument('--output', default=None, required=False,\n help='save plot to given file')\n\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n sns.set(style=\"darkgrid\", palette=\"muted\")\n cattype = CategoricalDtype(categories=['SCHED', 'UNSCHED', 'TIMEOUT'], ordered=True)\n\n df = []\n for folder in args.folders:\n data = generated.SchedulabilityData(folder=folder)\n data.filter_out('Branching', value=3)\n df.append(data.unpivot_time())\n df[-1]['Result'] = df[-1]['Result'].astype(cattype)\n if args.order:\n df[-1]['Analysis'] = df[-1]['Analysis'].astype(CategoricalDtype(categories=args.order, ordered=True))\n\n fig, axes = plt.subplots(1, len(args.folders), sharey=True, figsize=(args.width,args.height))\n if len(args.folders) == 1:\n plots = [(axes, df[0], None)]\n elif args.titles:\n assert len(args.titles) == len(args.folders)\n plots = zip(axes, df, args.titles)\n else:\n plots = zip(axes, df, [None for x in axes])\n\n first = True\n for ax, df, title in plots:\n sns.boxplot(y='Analysis', x='Time', data=df.mask(df['Result']=='TIMEOUT'),\n hue='Result', ax=ax,\n linewidth=1,\n fliersize=1,\n width=0.7)\n\n ax.get_legend().remove()\n\n if first:\n ax.set_ylabel(args.ylabel)\n first = False\n else:\n ax.set_ylabel(None)\n\n ax.set_xlabel(args.xlabel)\n\n if title is not None:\n ax.set_title(title)\n\n ax.legend(loc='upper left', bbox_to_anchor=(1.05, 1))\n\n\n sns.despine(left=True)\n\n if args.output:\n plt.savefig(args.output, bbox_inches='tight')\n else:\n plt.show()\n", "sub_path": "experiments/TUBS21/generated_timebox.py", "file_name": "generated_timebox.py", "file_ext": "py", "file_size_in_byte": 2765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.api.types.CategoricalDtype", "line_number": 40, "usage_type": "call"}, {"api_name": "eval.generated.SchedulabilityData", "line_number": 44, "usage_type": "call"}, {"api_name": "eval.generated", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.api.types.CategoricalDtype", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 62, "usage_type": "call"}, {"api_name": "seaborn.despine", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "637340842", "text": "from django import forms\nfrom .models import RateHistory\n\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\nclass RateSavingForm(forms.ModelForm):\n ua_rate = forms.DecimalField(max_digits=8, decimal_places=2, min_value=0.01) # Add validations to rate form field\n\n class Meta:\n model = RateHistory\n fields = ['ua_rate', 'date'] # Fields to use in form\n widgets = {\n 'ua_rate': forms.NumberInput(attrs={'class': 'mb-3 d-inline'}), # Add attributes to rate field\n 'date': DateInput()\n }\n\n def clean(self):\n super(RateSavingForm, self).clean()\n\n date = self.cleaned_data['date']\n if str(date).split('-')[0] < '1900':\n self.add_error('date', 'Year should be higher or equal 01-01-1900')\n return self.cleaned_data\n\n", "sub_path": "chart_rate/main/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.forms.DateInput", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.DecimalField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "models.RateHistory", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.NumberInput", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "148377501", "text": "'''\nBased on:\n\nhttps://gist.github.com/neilslater/40201a6c63b4462e6c6e458bab60d0b4\n\n\nRemaining: 1) Get std dev on predictions\n 2) Deeper, wider networks ? - right now, loss function seems converging - but testing is bad\n 3) Use sklearn, pipeline etc\n 4) Use callbacks\n 5) Plot conditional prob distribution kinda thing\n 6) What metric to use to check accuracy of all testimages?\n\n'''\n\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom keras import backend as K\nK.set_image_dim_ordering('tf')\nfrom keras.models import load_model\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\nimport glob\nimport time\ntime_i = time.time()\n\n\n\n\nDir1 = '/home/nes/Desktop/ConvNetData/lens/AllTrainTestSets/JPG/'\nDir2 = ['single/', 'stack/'][1]\nDir3 = ['0/', '1/'][1]\ndata_path = Dir1 + Dir2 + Dir3 + 'TestData/'\nnames = ['lensed', 'unlensed']\ndata_dir_list = ['lensed_outputs', 'unlensed_outputs']\n\nimage_size = img_rows = 45\nimg_cols = 45\nnum_channel = 1\n# num_epoch = 10\n# batch_size = 8\n\nnum_classes = 1\nnum_files = 2000\n# num_para = 5\n\n# num_samples = 1999\n# cv_size = 2000\n\nnum_epoch = 100\nbatch_size = 16\nlearning_rate = 1e-4 # Warning: lr and decay vary across optimizers\ndecay_rate = 0.0\nopti_id = 1 # [SGD, Adadelta, RMSprop]\nloss_id = 0 # [mse, mae] # mse is always better\npara_row = 3\n\n\nnum_epoch = 100\nbatch_size = 16\nlearning_rate = 1e-3 # Warning: lr and decay vary across optimizers\ndecay_rate = 0.0\nopti_id = 1 # [SGD, Adadelta, RMSprop]\nloss_id = 0 # [mse, mae] # mse is always better\npara_row = 4\n\n\n\n# num_epoch = 10\n# batch_size = 16\n# learning_rate = .001 # Warning: lr and decay vary across optimizers\n# decay_rate = 0.01\n# opti_id = 1 # [SGD, Adadelta, RMSprop]\n# loss_id = 0 # [mse, mae] # mse is always better\nprint(para_row)\nprint('vel-dispersion ellipticity orientation z magnification')\n\n\n\nDirIn = '/home/nes/Dropbox/Argonne/lensData/ModelOutRegression/'\n\nfileOut = 'RegressionStackNew_opti' + str(opti_id) + '_loss' + str(loss_id) + '_lr' + str(\n learning_rate) + '_decay' + str(decay_rate) + '_batch' + str(batch_size) + '_epoch' + str(\n num_epoch)\n\n\n# hyperpara = 'Deeper*300*'\nhyperpara = 'RegressionStackNew*'\n\n\nfilelist = sorted(glob.glob(DirIn + hyperpara + '*.hdf5'))\n# filelist = sorted(glob.glob(DirIn +'*.npy')) # All\nhistlist = sorted(glob.glob(DirIn + hyperpara + '*.npy'))\n\nprint(len(filelist))\nprint(*filelist, sep='\\n')\n\n# for i in range(len(filelist)):\nfor i in range(1):\n\n # fileIn = filelist[i]\n fileIn = DirIn + fileOut + '.hdf5'\n\n # histIn = histlist[i]\n histIn = DirIn + fileOut + '.npy'\n\n\n loaded_model = load_model(fileIn)\n print(fileIn)\n history = np.load(histIn)\n print(histIn)\n\n\n\ndef load_test():\n img_data_list = []\n # labels = []\n\n # for name in names:\n for labelID in [0, 1]:\n name = names[labelID]\n for img_ind in range( int(num_files / num_classes) ):\n\n input_img = np.load(data_path + name + str(img_ind) + '.npy')\n if np.isnan(input_img).any():\n print(labelID, img_ind, ' -- ERROR: NaN')\n else:\n\n img_data_list.append(input_img)\n # labels.append([labelID, 0.5*labelID, 0.33*labelID, 0.7*labelID, 5.0*labelID] )\n\n img_data = np.array(img_data_list)\n img_data = img_data.astype('float32')\n # labels = np.array(labels)\n # labels = labels.astype('float32')\n\n img_data /= 255\n print (img_data.shape)\n\n if num_channel == 1:\n if K.image_dim_ordering() == 'th':\n img_data = np.expand_dims(img_data, axis=1)\n print (img_data.shape)\n else:\n img_data = np.expand_dims(img_data, axis=4)\n print (img_data.shape)\n\n else:\n if K.image_dim_ordering() == 'th':\n img_data = np.rollaxis(img_data, 3, 1)\n print (img_data.shape)\n\n X_test = img_data\n # y_train = np_utils.to_categorical(labels, num_classes)\n labels = np.load(Dir1 + Dir2 + Dir3 + 'Test5para.npy')\n print(labels.shape)\n\n # para5 = labels[:,para_row + 2]\n para5 = labels[:,2:]\n np.random.seed(12345)\n shuffleOrder = np.arange(X_test.shape[0])\n np.random.shuffle(shuffleOrder)\n X_test = X_test[shuffleOrder]\n y_test = para5[shuffleOrder]\n # y_train = labels1[shuffleOrder]\n\n # print y_train[0:10]\n # print y_train[0:10]\n\n return X_test, y_test\n\ndef read_and_normalize_test_data():\n test_data, test_target = load_test()\n test_data = np.array(test_data, dtype=np.float32)\n test_target = np.array(test_target, dtype=np.float32)\n m = test_data.mean()\n s = test_data.std()\n\n print ('Test mean, sd:', m, s )\n test_data -= m\n test_data /= s\n print('Test shape:', test_data.shape)\n print(test_data.shape[0], 'test samples')\n return test_data, test_target\n\n\nrescaleMin, rescaleMax = np.load(Dir1 + Dir2 + Dir3 + 'RescalingMinMax5para.npy')\nprint(rescaleMin.shape)\n\ntest_data, test_target = read_and_normalize_test_data()\ntest_data = test_data[0:num_files,:,:,:]\ntest_target = test_target[0:num_files]\n\n\n########## Predictions ######################\n\nprint('vel-dispersion ellipticity orientation z magnification')\n\npredictions = np.zeros_like(test_target)\n\nfor i in range(num_files):\n test_img = np.expand_dims(test_data[i], axis=0)\n predictions[i] = loaded_model.predict(test_img, batch_size= 1, verbose=0)[0]\n\n\n######### Check #####################\n\nfor i in range(num_files):\n\n print('true: ', rescaleMin + (rescaleMax - rescaleMin)*test_target[i])\n print('pred: ', rescaleMin + (rescaleMax - rescaleMin)*np.array(predictions[i]))\n print(30*'-')\n\n\n\n\n\nimport matplotlib.pylab as plt\n\n# plt.figure(10)\nfig, ax = plt.subplots(2, 3, figsize=(10, 6))\nfig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)\n\n\n# ax[0, 0].plot( y_train, predictions,\n# 'kx', alpha = 0.1, label = 'rescaled vel-dispersion')\n\n\nax[0, 0].plot( test_target[:, 0], predictions[:, 0], 'kx', alpha = 0.05,\n label = 'rescaled vel-dispersion')\nax[0, 1].plot( test_target[:, 1], predictions[:, 1], 'kx', alpha = 0.05,\n label = 'rescaled ellipticity')\nax[0, 2].plot( test_target[:, 2], predictions[:, 2], 'kx', alpha = 0.05,\n label = 'rescaled orientation')\nax[1, 0].plot( test_target[:, 3], predictions[:, 3], 'kx', alpha = 0.05,\n label = 'rescaled redshift')\nax[1, 1].plot( test_target[:, 4], predictions[:, 4], 'kx', alpha = 0.05,\n label = 'rescaled magnification')\n\n\n\nax[0, 0].plot( [0, 1], [0, 1], 'r')\nax[0, 1].plot( [0, 1], [0, 1], 'r')\nax[0, 2].plot( [0, 1], [0, 1], 'r')\nax[1, 0].plot( [0, 1], [0, 1], 'r')\nax[1, 1].plot( [0, 1], [0, 1], 'r')\n\nax[0, 0].set_xlabel('true')\nax[0, 0].set_ylabel('pred')\nax[0, 1].set_xlabel('true')\nax[0, 1].set_ylabel('pred')\nax[0, 2].set_xlabel('true')\nax[0, 2].set_ylabel('pred')\nax[1, 0].set_xlabel('true')\nax[1, 0].set_ylabel('pred')\nax[1, 1].set_xlabel('true')\nax[1, 1].set_ylabel('pred')\n\nax[0, 0].axis('equal')\nax[0, 1].axis('equal')\nax[0, 2].axis('equal')\nax[1, 0].axis('equal')\nax[1, 1].axis('equal')\n\nax[0, 0].set_title('rescaled vel-dispersion')\nax[0, 1].set_title('rescaled ellipticity')\nax[0, 2].set_title('rescaled orientation')\nax[1, 0].set_title('rescaled redshift')\nax[1, 1].set_title( 'rescaled magnification')\n\nax[1, 2].set_visible(False)\n\nplt.show()\n\n\n#####################################\n\n\n\n\n\nCheck_model = False\nif Check_model:\n loaded_model.summary()\n loaded_model.get_config()\n loaded_model.layers[0].get_config()\n loaded_model.layers[0].input_shape\n loaded_model.layers[0].output_shape\n loaded_model.layers[0].get_weights()\n np.shape(loaded_model.layers[0].get_weights()[0])\n loaded_model.layers[0].trainable\n\n from keras.utils.vis_utils import plot_model\n plot_model(loaded_model, to_file='model_100runs_test.png', show_shapes=True)\n\n\nplotLossAcc = True\nif plotLossAcc:\n import matplotlib.pylab as plt\n\n # history = np.load('/home/nes/Dropbox/Argonne/lensData/ModelOutRegression/RegressionStack_opti1_loss0_lr0.0001_decay0.0_batch16_epoch200.npy')\n\n epochs = history[0,:]\n train_loss = history[1,:]\n val_loss = history[2,:]\n # train_loss= ModelFit.history['loss']\n # val_loss= ModelFit.history['val_loss']\n # train_acc= ModelFit.history['acc']\n # val_acc= ModelFit.history['val_acc']\n # train_loss\n # epochs= range(1, num_epoch+1)\n\n\n fig, ax = plt.subplots(1,1, sharex= True, figsize = (7,5))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace= 0.02)\n ax.plot(epochs,train_loss)\n ax.plot(epochs,val_loss)\n ax.set_ylabel('loss')\n # ax[0].set_ylim([0,1])\n # ax[0].set_title('Loss')\n ax.legend(['train_loss','val_loss'])\n\n # accuracy doesn't make sense for regression\n\n plt.show()\n\n\ntime_j = time.time()\nprint(time_j - time_i, 'seconds')\nprint( (time_j - time_i)/num_files, 'seconds per image' )\n\n\n\n\n\n#\n#\n#\n#\n# from keras.wrappers.scikit_learn import KerasClassifier\n# from keras.utils import np_utils\n# from sklearn.model_selection import cross_val_score\n# from sklearn.model_selection import KFold\n# from sklearn.preprocessing import LabelEncoder\n# from sklearn.pipeline import Pipeline\n#\n#\n# estimator = KerasClassifier(build_fn=create_model, epochs=20, batch_size= 16, verbose=1)\n# kfold = KFold(n_splits=10, shuffle=True, random_state=4)\n# results = cross_val_score(estimator, test_data, test_target, cv=kfold)\n# print(\"Baseline: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n", "sub_path": "regression_lens5ParaTest2.py", "file_name": "regression_lens5ParaTest2.py", "file_ext": "py", "file_size_in_byte": 9577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "keras.backend.set_image_dim_ordering", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.set_printoptions", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 22, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 91, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "keras.backend.image_dim_ordering", "line_number": 141, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 141, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.backend.image_dim_ordering", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 149, "usage_type": "name"}, {"api_name": "numpy.rollaxis", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplots", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pylab.show", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 273, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 290, "usage_type": "call"}, {"api_name": "keras.utils.vis_utils.plot_model", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplots", "line_number": 314, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 314, "usage_type": "name"}, {"api_name": "matplotlib.pylab.show", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 325, "usage_type": "name"}, {"api_name": "time.time", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "531193187", "text": "# Class for handling everything.json\n# The 'game' variable may be passed in by the user at runtime.\n# This allows an engine to load a specific game's everything.json\n\nimport json\n\nclass Content(object):\n\n\tdef __init__(self, game=None):\n\t\tr = open(\"engine.json\",\"r\").read()\n\t\tself.engine=json.loads(r)\n\t\tif game==None:\n\t\t\tdefault_path = \"games/default/everything.json\"\n\t\t\tf = open(default_path,\"r\").read()\n\t\t\tdata=json.loads(f)\n\t\t\tself.data = data\n\t\t\tself.path = default_path\n\t\telse:\n\t\t\tgame_path = \"games/\"+game+\"/everything.json\"\n\t\t\tf = open(game_path,\"r\").read()\n\t\t\tdata=json.loads(f)\n\t\t\tself.data = data\n\t\t\tself.path = game_path\n", "sub_path": "otrera/engines/default/content.py", "file_name": "content.py", "file_ext": "py", "file_size_in_byte": 632, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.loads", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "523463101", "text": "#!/usr/bin/env python\n# -- coding:utf-8 --\nimport hashlib\nimport random\nimport sys\nimport uuid\nimport networkx as nx\nimport json\nimport time\nfrom datetime import datetime\nimport multiprocessing as mp\nimport logging\nfrom config import BASE_SPARK, BASE_FILE_PATH\nfrom pandas import Series\nimport os\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n\ndef get_date():\n return datetime.now().strftime('%Y%m%d %H:%M:%S')\n\n\nclass Graph(object):\n def __init__(self, edges):\n self.edges_num = len(edges)\n self.graph = self.generate_graph(edges)\n self.all_nodes = []\n self.__all_cycles = []\n self.__cycle_pairs = []\n self.__actual_nodes = []\n\n @staticmethod\n def generate_graph(edges):\n \"\"\"\n Generate graph\n :param edges:\n :return:\n \"\"\"\n try:\n edges = [(x['link'][0], x['link'][1], {\n 'attrs': x['attrs']\n }) for x in edges]\n graph = nx.DiGraph(edges)\n return graph\n except nx.NetworkXException:\n return False\n\n @staticmethod\n def all_nodes_with_attrs(nodes):\n \"\"\"\n Get all node with attributes\n :return:\n \"\"\"\n all_nodes = []\n for node in nodes:\n node_attr = NODE.get_node(node)\n all_nodes.append(node_attr)\n return all_nodes\n\n @staticmethod\n def links_to_pairs(links):\n pairs_dict = {}\n for i in range(len(links)):\n temp_list = []\n for link in links[i]:\n for j in range(len(link) - 1):\n _temp = [link[j], link[j + 1]]\n if _temp not in temp_list:\n temp_list.append(_temp)\n pairs_dict[links.index[i]] = temp_list\n return Series(pairs_dict)\n\n def get_instance_id(self):\n return self.__instance_id\n\n def __set_instance_id(self, ins_id):\n self.__instance_id = ins_id\n\n def set_actual_nodes(self, nodes):\n self.__actual_nodes = nodes\n\n def set_all_nodes(self):\n \"\"\"\n Set all nodes in Graph instance\n :return:\n \"\"\"\n try:\n all_nodes = [n for n in self.graph.nodes()]\n self.all_nodes = all_nodes\n ins_id = ''.join(['[graph_', str(self.edges_num), '_', str(len(all_nodes)), ']'])\n self.__set_instance_id(ins_id)\n logging.info('===Graph %s got all nodes at %s and all nodes length is %s===' %\n (ins_id, get_date(), len(all_nodes)))\n except nx.NetworkXException:\n self.all_nodes = []\n\n def set_all_cycles(self):\n \"\"\"\n Set all cycles in Graph instance\n :return:\n \"\"\"\n try:\n all_cycles = nx.simple_cycles(self.graph)\n cycles = [cycle for cycle in all_cycles]\n logging.info('===Graph %s got all cycles at %s and all cycles length is %s===' %\n (self.get_instance_id(), get_date(), len(cycles)))\n self.__all_cycles = cycles\n except (nx.NetworkXException, nx.NetworkXNoCycle):\n self.__all_cycles = []\n\n def set_all_cycle_pairs(self):\n \"\"\"\n Set all cycle pairs in Graph instance\n :return:\n \"\"\"\n cycle_pairs = []\n for cycle in self.__all_cycles:\n for c in range(len(cycle)):\n t = []\n if c == len(cycle) - 1:\n t.append(cycle[c])\n t.append(cycle[0])\n else:\n t.append(cycle[c])\n t.append(cycle[c + 1])\n cycle_pairs.append(''.join(t))\n logging.info('===Graph %s got all cycle pairs at %s and cycle paris length is %s and '\n 'cycle paris distinct length is %s===' %\n (self.get_instance_id(), get_date(), len(cycle_pairs), len(list(set(cycle_pairs)))))\n self.__cycle_pairs = list(set(cycle_pairs))\n\n def all_links(self):\n \"\"\"\n All links in graph\n :return:\n \"\"\"\n try:\n graph = self.graph\n all_pairs_links = nx.all_pairs_shortest_path(graph)\n all_links = [path[1][key] for path in all_pairs_links for key in path[1] if len(path[1][key]) == 2]\n actual_nodes = self.all_nodes\n self.set_actual_nodes(actual_nodes)\n cycle_pairs = self.__cycle_pairs\n links = [link for link in all_links if (''.join(link)) not in cycle_pairs]\n link_list = [dict({'source_id': str(link[0]),\n 'target_id': str(link[1])}.items() + graph[link[0]][link[1]].items()) for link\n in links]\n _link_result = {'paths': link_list, 'type': 'link', 'circle_id': None}\n return _link_result\n except (nx.NetworkXException, nx.NetworkXNoPath):\n return []\n\n def cutoff_links(self, cutoff):\n \"\"\"\n cutoff level of path\n :param cutoff:\n :return:\n \"\"\"\n try:\n graph = self.graph\n all_links = []\n pairs_links = nx.all_pairs_shortest_path(graph, cutoff)\n for link in pairs_links:\n all_links.append(link)\n\n ret_links = {}\n ret_nodes = {}\n # 1. Find the path starting from target which path's length <= cutoff\n for links in all_links:\n _id = links[0]\n _links = links[1]\n temp = []\n for k in _links:\n if len(_links[k]) > 1:\n temp.append(_links[k])\n ret_links[_id] = temp\n\n # Turn to pandas for fast key-value search\n pd_ret_links = Series(ret_links)\n\n # 2. Find the path ending at target which path's length <= cutoff\n for links in all_links:\n _links = links[1]\n for k in _links:\n if len(_links[k]) > 1:\n pd_ret_links[k].append(_links[k])\n\n # 3. Find the target's related nodes\n for i in range(len(pd_ret_links)):\n node_list = []\n for links in pd_ret_links[i]:\n for node_id in links:\n node_list.append(node_id)\n ret_nodes[pd_ret_links.index[i]] = list(set(node_list))\n # Turn to pandas for fast key-value search\n pd_ret_nodes = Series(ret_nodes)\n\n # 4. Filter path and get related cycles\n results = []\n _links = self.links_to_pairs(pd_ret_links)\n logging.info('===Graph %s got cutoff links at %s and length is %s===' %\n (self.get_instance_id(), get_date(), len(_links)))\n cycle_pairs = self.__cycle_pairs\n for i in range(len(_links)):\n link = [links for links in _links[i] if (''.join(links)) not in cycle_pairs]\n\n lk_list = [dict({'source_id': str(lk[0]),\n 'target_id': str(lk[1])}.items() + graph[lk[0]][lk[1]].items()) for lk\n in link]\n actual_nodes = pd_ret_nodes[i]\n self.set_actual_nodes(actual_nodes)\n _result = self.all_cycles(cutoff)\n cy_list = _result['cycle_result']\n act_nodes = _result['actual_nodes']\n\n if len(lk_list):\n _link_result = {'paths': lk_list, 'type': 'link', 'circle_id': None}\n cy_list.append(_link_result)\n nd_list = self.all_nodes_with_attrs(act_nodes)\n node_id = pd_ret_nodes.index[i]\n node_eid = NODE.get_node_eid(node_id)\n union_id = create_union_id()\n if node_eid:\n temp = {\n \"id\": union_id,\n \"value\": json.dumps({\"nodes\": nd_list, \"links\": cy_list})\n }\n results.append({\"eid\": node_eid, \"union_id\": union_id, \"value\": json.dumps(temp)})\n return results\n except (nx.NetworkXException, nx.NetworkXNoPath):\n return []\n\n def all_cycles(self, cutoff=0):\n \"\"\"\n All cycle in graph\n :param cutoff:\n :return:\n \"\"\"\n try:\n graph = self.graph\n _related_cycle_nodes = []\n if cutoff:\n actual_cycles = []\n for cycles in self.__all_cycles:\n for node in self.__actual_nodes:\n if node in cycles:\n if len(cycles) <= 20:\n cutoff_cycles = cycles\n else:\n node_idx = cycles.index(node)\n pre_idx = node_idx - 10\n sur_idx = node_idx + 10\n pre_len = pre_idx if pre_idx > -1 else 0\n sur_len = sur_idx if sur_idx < (len(cycles) - 1) else (len(cycles) - 1)\n cutoff_cycles = cycles[pre_len:sur_len + 1]\n if cutoff_cycles not in actual_cycles:\n actual_cycles.append(cutoff_cycles)\n for cycles in actual_cycles:\n for n in cycles:\n if n not in self.__actual_nodes:\n _related_cycle_nodes.append(n)\n else:\n actual_cycles = self.__all_cycles\n\n cycle_list = []\n for cycle in actual_cycles:\n temp_list = []\n if len(cycle) == 2:\n source_id = cycle[0]\n target_id = cycle[1]\n if graph.has_edge(source_id, target_id):\n temp = {'source_id': str(source_id),\n 'target_id': str(target_id)}\n temp_list.append(dict(temp.items() + graph[source_id][target_id].items()))\n source_id = cycle[1]\n target_id = cycle[0]\n if graph.has_edge(source_id, target_id):\n temp = {'source_id': str(source_id),\n 'target_id': str(target_id)}\n temp_list.append(dict(temp.items() + graph[source_id][target_id].items()))\n\n cycle_list.append(temp_list)\n else:\n for i in range(len(cycle)):\n if i == (len(cycle) - 1):\n source_id = cycle[i]\n target_id = cycle[0]\n if graph.has_edge(source_id, target_id):\n temp = {'source_id': str(source_id),\n 'target_id': str(target_id)}\n temp_list.append(dict(temp.items() + graph[source_id][target_id].items()))\n else:\n source_id = cycle[i]\n target_id = cycle[i + 1]\n if graph.has_edge(source_id, target_id):\n temp = {'source_id': str(source_id),\n 'target_id': str(target_id)}\n temp_list.append(dict(temp.items() + graph[source_id][target_id].items()))\n\n cycle_list.append(temp_list)\n\n cycle_result = [{\n 'paths': cycle,\n 'type': 'circle' if len(cycle) > 2 else 'each_other' if len(cycle) == 2 else 'self',\n 'circle_id': create_union_id()\n } for cycle in cycle_list]\n\n return {'cycle_result': cycle_result, 'actual_nodes': self.__actual_nodes + _related_cycle_nodes}\n except nx.NetworkXException:\n return []\n\n\ndef create_union_id():\n t = ''.join([create_date(), str(uuid.uuid1())])\n m = hashlib.md5()\n m.update(bytes(str(t)))\n return m.hexdigest()\n\n\ndef create_date():\n a1 = (1900, 1, 1, 0, 0, 0, 0, 0, 0)\n a2 = (3000, 12, 31, 23, 59, 59, 0, 0, 0)\n\n start = time.mktime(a1)\n end = time.mktime(a2)\n\n t = random.randint(start, end)\n date_tuple = time.localtime(t)\n date = time.strftime(\"%Y-%m-%d %H:%m:%s\", date_tuple)\n return date\n\n\ndef worker(queue_sg, queue_lg, edges):\n \"\"\"\n Queue's producer.\n Generate graph and calculate graph's basic elements includes nodes, cycles and so on.\n Put graph instance to the queue.\n :param queue_sg: Queue\n :param queue_lg: Queue\n :param edges: Graph's edges with attributes\n :return: Class Graph's instance\n \"\"\"\n graph = Graph(edges)\n if len(edges) >= 500:\n queue_lg.put(graph)\n else:\n queue_sg.put(graph)\n del graph\n return\n\n\ndef listener_small_graph(queue, q_write):\n \"\"\"\n Queue's consumer for complete graph calculation.\n Get graph instance for further process includes calculating links, related cycles and nodes.\n Write result to file.\n :param queue: Calculating queue\n :param q_write: Writing queue\n :return:\n \"\"\"\n while True:\n graph = queue.get()\n if graph == 'end':\n break\n graph.set_all_nodes()\n graph.set_all_cycles()\n graph.set_all_cycle_pairs()\n\n all_nodes = graph.all_nodes\n\n link_result = graph.all_links()\n cycle_result = graph.all_cycles()['cycle_result']\n node_result = graph.all_nodes_with_attrs(all_nodes)\n logging.info('Graph %s received complete result at %s and node length is %s'\n ' and path length is %s and cycle length is %s' %\n (graph.get_instance_id(), get_date(), len(node_result),\n len(link_result['paths']), len(cycle_result)))\n\n if len(link_result['paths']):\n cycle_result.append(link_result)\n\n value = json.dumps({\"nodes\": node_result, \"links\": cycle_result})\n union_id = create_union_id()\n result = json.dumps({\"id\": union_id, \"value\": value})\n q_write.put(json.dumps({\"value\": result}))\n\n for node in all_nodes:\n eid = NODE.get_node_eid(node)\n if eid:\n result = json.dumps({\"union_id\": union_id, \"eid\": eid})\n q_write.put(json.dumps({\"mapping\": result}))\n return\n\n\ndef listener_large_graph(queue, q_write, cutoff):\n \"\"\"\n Queue's consumer for cutoff graph calculation.\n Get graph instance for further process includes calculating links, related cycles and nodes.\n Write result to file.\n :param queue: Calculating queue\n :param q_write: Writing queue\n :param cutoff: Cutoff\n :return:\n \"\"\"\n cutoff = int(cutoff) or 10\n while True:\n graph = queue.get()\n if graph == 'end':\n break\n graph.set_all_cycles()\n results = graph.cutoff_links(cutoff)\n logging.info('===Graph %s received cutoff results and results length is %s===' %\n (graph.get_instance_id(), len(results)))\n for result in results:\n eid = result['eid']\n union_id = result['union_id']\n value = result['value']\n q_write.put(json.dumps({\"value\": value}))\n q_write.put(json.dumps({\"mapping\": json.dumps({\"union_id\": union_id, \"eid\": eid})}))\n return\n\n\ndef customer(queue, lock, f_rel, f_eid):\n while True:\n data = queue.get()\n obj = json.loads(data)\n if 'value' in obj:\n lock.acquire()\n f_rel.write(obj['value'] + '\\n')\n f_rel.flush()\n lock.release()\n if 'mapping' in obj:\n lock.acquire()\n f_eid.write(obj['mapping'] + '\\n')\n f_eid.flush()\n lock.release()\n\n\ndef merge_file():\n sub_dir_path = BASE_FILE_PATH.get_sub_dir_path()\n input_graph_file_path = BASE_FILE_PATH.get_input_graph_path()\n input_nodes_file_path = BASE_FILE_PATH.get_input_nodes_file_path()\n grnt_rel_path = BASE_FILE_PATH.get_output_rel_path()\n\n # os.system('''\n # if [ ! -d \"{0}\" ]; then\n # mkdir {0}\n # else\n # rm {0}*\n # fi\n # '''.format(sub_dir_path))\n\n os.system('''\n if [ ! -d \"{0}\" ]; then\n mkdir {0}\n fi\n '''.format(sub_dir_path))\n\n if not os.path.exists(input_graph_file_path):\n os.system('touch %s' % input_graph_file_path)\n os.system('hdfs dfs -getmerge %s %s' % (BASE_SPARK.get_hdfs_graph_file_path(), input_graph_file_path))\n\n if not os.path.exists(input_nodes_file_path):\n os.system('hdfs dfs -getmerge %s %s' % (BASE_SPARK.get_hdfs_nodes_file_path(), input_nodes_file_path))\n\n if not os.path.exists(grnt_rel_path):\n os.system('touch %s' % grnt_rel_path)\n\n local_graph_size = int(os.popen('ls -la {0} | cut -d \" \" -f 5'.format(input_graph_file_path)).readlines()[0])\n hdfs_graph_size = int(\n os.popen('hdfs dfs -du -s {0} | cut -d \" \" -f 1'.format(BASE_SPARK.get_hdfs_graph_file_path())).readlines()[0])\n\n local_nodes_size = int(os.popen('ls -la {0} | cut -d \" \" -f 5'.format(input_nodes_file_path)).readlines()[0])\n hdfs_nodes_size = int(\n os.popen('hdfs dfs -du -s {0} | cut -d \" \" -f 1'.format(BASE_SPARK.get_hdfs_nodes_file_path())).readlines()[0])\n\n if local_graph_size != hdfs_graph_size:\n return False\n\n if local_nodes_size != hdfs_nodes_size:\n return False\n\n return True\n\n\ndef main(rel_path, eid_mapping_path, cutoff=10):\n manager = mp.Manager()\n queue_sg = manager.Queue()\n queue_lg = manager.Queue()\n queue_write = manager.Queue()\n lock = manager.Lock()\n pool = mp.Pool(mp.cpu_count() + 2)\n\n f_rel = open(rel_path, 'wb')\n f_eid = open(eid_mapping_path, 'wb')\n\n pool.apply_async(listener_small_graph, (queue_sg, queue_write,))\n pool.apply_async(listener_large_graph, (queue_lg, queue_write, cutoff,))\n writer = mp.Process(target=customer, args=(queue_write, lock, f_rel, f_eid,))\n\n jobs = []\n input_file = BASE_FILE_PATH.get_input_graph_path()\n with open(input_file, 'r') as f:\n for line in f.readlines():\n line_json = json.loads(line.strip())\n edges = line_json['links']\n job = pool.apply_async(worker, (queue_sg, queue_lg, edges,))\n jobs.append(job)\n\n for job in jobs:\n job.get()\n\n writer.start()\n\n queue_sg.put('end')\n queue_lg.put('end')\n pool.close()\n pool.join()\n\n while True:\n if not queue_write.qsize():\n writer.terminate()\n writer.join()\n break\n\n f_rel.close()\n f_eid.close()\n\n\nif __name__ == '__main__':\n logging.info('=====Processing start at %s!!!=====' % get_date())\n\n stat = merge_file()\n if not stat:\n logging.info('Get file from HDFS error!')\n sys.exit(1)\n\n from utils import NODE\n\n level = int(sys.argv[1]) or 10\n output_rel_path = BASE_FILE_PATH.get_output_rel_path()\n output_eid_mapping_path = BASE_FILE_PATH.get_output_eid_mapping_path()\n\n main(output_rel_path, output_eid_mapping_path, level)\n\n tmp = os.popen('hdfs dfs -stat %s' % BASE_SPARK.get_hdfs_rel_json_path()).readlines()\n if len(tmp):\n os.system('hdfs dfs -rm %s' % BASE_SPARK.get_hdfs_rel_json_path())\n os.system('hdfs dfs -put %s %s' % (output_rel_path, BASE_SPARK.get_hdfs_rel_json_path()))\n\n tmp = os.popen('hdfs dfs -stat %s' % BASE_SPARK.get_hdfs_eid_mapping_json_path()).readlines()\n if len(tmp):\n os.system('hdfs dfs -rm %s' % BASE_SPARK.get_hdfs_eid_mapping_json_path())\n os.system('hdfs dfs -put %s %s' % (output_eid_mapping_path, BASE_SPARK.get_hdfs_eid_mapping_json_path()))\n\n logging.info('=====Processing done at %s!!!=====' % get_date())\n", "sub_path": "guarantee_relation/second_phase/grnt_big_graph_queue_all_cycle.py", "file_name": "grnt_big_graph_queue_all_cycle.py", "file_ext": "py", "file_size_in_byte": 20030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "name"}, {"api_name": "networkx.DiGraph", "line_number": 48, "usage_type": "call"}, {"api_name": "networkx.NetworkXException", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 97, "usage_type": "call"}, {"api_name": "networkx.NetworkXException", "line_number": 99, "usage_type": "attribute"}, {"api_name": "networkx.simple_cycles", "line_number": 108, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 110, "usage_type": "call"}, {"api_name": "networkx.NetworkXException", "line_number": 113, "usage_type": "attribute"}, {"api_name": "networkx.NetworkXNoCycle", "line_number": 113, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 132, "usage_type": "call"}, {"api_name": "networkx.all_pairs_shortest_path", "line_number": 144, "usage_type": "call"}, {"api_name": "networkx.NetworkXException", "line_number": 155, "usage_type": "attribute"}, {"api_name": "networkx.NetworkXNoPath", "line_number": 155, "usage_type": "attribute"}, {"api_name": "networkx.all_pairs_shortest_path", "line_number": 167, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 206, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 231, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 233, "usage_type": "call"}, {"api_name": "networkx.NetworkXException", "line_number": 235, "usage_type": "attribute"}, {"api_name": "networkx.NetworkXNoPath", "line_number": 235, "usage_type": "attribute"}, {"api_name": "networkx.NetworkXException", "line_number": 314, "usage_type": "attribute"}, {"api_name": "uuid.uuid1", "line_number": 319, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 320, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 329, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 330, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 332, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 333, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 334, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 379, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 387, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 389, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 390, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 395, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 396, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 417, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 423, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 424, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 431, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH.get_sub_dir_path", "line_number": 445, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH", "line_number": 445, "usage_type": "name"}, {"api_name": "config.BASE_FILE_PATH.get_input_graph_path", "line_number": 446, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH", "line_number": 446, "usage_type": "name"}, {"api_name": "config.BASE_FILE_PATH.get_input_nodes_file_path", "line_number": 447, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH", "line_number": 447, "usage_type": "name"}, {"api_name": "config.BASE_FILE_PATH.get_output_rel_path", "line_number": 448, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH", "line_number": 448, "usage_type": "name"}, {"api_name": "os.system", "line_number": 458, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 464, "usage_type": "call"}, {"api_name": "os.path", "line_number": 464, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 465, "usage_type": "call"}, {"api_name": "os.system", "line_number": 466, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_graph_file_path", "line_number": 466, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 466, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path", "line_number": 468, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 469, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_nodes_file_path", "line_number": 469, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 469, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path", "line_number": 471, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 472, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 474, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 476, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_graph_file_path", "line_number": 476, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 476, "usage_type": "name"}, {"api_name": "os.popen", "line_number": 478, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 480, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_nodes_file_path", "line_number": 480, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 480, "usage_type": "name"}, {"api_name": "multiprocessing.Manager", "line_number": 492, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 497, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 497, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 504, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH.get_input_graph_path", "line_number": 507, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH", "line_number": 507, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 510, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 536, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 540, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 541, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 545, "usage_type": "attribute"}, {"api_name": "config.BASE_FILE_PATH.get_output_rel_path", "line_number": 546, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH", "line_number": 546, "usage_type": "name"}, {"api_name": "config.BASE_FILE_PATH.get_output_eid_mapping_path", "line_number": 547, "usage_type": "call"}, {"api_name": "config.BASE_FILE_PATH", "line_number": 547, "usage_type": "name"}, {"api_name": "os.popen", "line_number": 551, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_rel_json_path", "line_number": 551, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 551, "usage_type": "name"}, {"api_name": "os.system", "line_number": 553, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_rel_json_path", "line_number": 553, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 553, "usage_type": "name"}, {"api_name": "os.system", "line_number": 554, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_rel_json_path", "line_number": 554, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 554, "usage_type": "name"}, {"api_name": "os.popen", "line_number": 556, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_eid_mapping_json_path", "line_number": 556, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 556, "usage_type": "name"}, {"api_name": "os.system", "line_number": 558, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_eid_mapping_json_path", "line_number": 558, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 558, "usage_type": "name"}, {"api_name": "os.system", "line_number": 559, "usage_type": "call"}, {"api_name": "config.BASE_SPARK.get_hdfs_eid_mapping_json_path", "line_number": 559, "usage_type": "call"}, {"api_name": "config.BASE_SPARK", "line_number": 559, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 561, "usage_type": "call"}]} +{"seq_id": "568815286", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import TimeModel\nfrom .forms import TimeForm\n\n# Create your views here.\n\n# LIST TEACHERS\ndef index(request):\n allTeachers = TimeModel.objects.all()\n\n return render(request, 'timeApp/index.html', {'allTeachers': allTeachers})\n\n# DISPLAY ENTRIES OF SELECTED TEACHER\ndef teacherInfo(request, id):\n teacher = get_object_or_404(TimeModel, pk=id)\n newEntry = TimeForm(request.POST or None)\n if newEntry.is_valid():\n newEntry.save()\n return redirect('index')\n context ={\n 'teacher': teacher,\n 'newEntry': newEntry\n }\n return render(request, 'timeApp/info.html', context)\n\n# NEW TEACHER ENTRY\ndef newTeacher(request):\n newEntry = TimeForm(request.POST or None)\n if newEntry.is_valid():\n newEntry.save()\n return redirect('index')\n return render(request, 'timeApp/newTeacher.html', {'newEntry': newEntry})\n\n# EDIT TEACHER\ndef edit(request, id):\n teacher = get_object_or_404(TimeModel, pk=id)\n editForm = TimeForm(request.POST or None, instance= teacher)\n if editForm.is_valid():\n editForm.save()\n return redirect('index')\n\n#DELETE\ndef delete(request, id):\n teacher = get_object_or_404(TimeModel, pk=id)\n if request.method == \"POST\":\n teacher.delete()\n return redirect('index')\n\n return render(request, 'timeApp/delete.html', {'holla': teacher})\n\n# TO SHOW SCHOOLS HOURS\ndef schoolHours(request, id):\n school = TimeModel.objects.filter('school')\n hours = get_object_or_404(school, pk=id)\n context={\n 'hours': hours,\n 'school': school,\n }\n return render(request, 'timeApp/schoolHours.html', context)\n\n\n", "sub_path": "timeProj/timeApp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "models.TimeModel.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "models.TimeModel.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.TimeModel", "line_number": 9, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 15, "usage_type": "call"}, {"api_name": "models.TimeModel", "line_number": 15, "usage_type": "argument"}, {"api_name": "forms.TimeForm", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "forms.TimeForm", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 36, "usage_type": "call"}, {"api_name": "models.TimeModel", "line_number": 36, "usage_type": "argument"}, {"api_name": "forms.TimeForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 44, "usage_type": "call"}, {"api_name": "models.TimeModel", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "models.TimeModel.objects.filter", "line_number": 53, "usage_type": "call"}, {"api_name": "models.TimeModel.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.TimeModel", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 54, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "188496254", "text": "import numpy as np\r\nfrom glob import glob\r\nimport glob\r\nimport os\r\nfrom ResNet3D.ResUNet3D import ResUNet\r\nimport nibabel as nib\r\nfrom keras.models import Model\r\nfrom utils import show_img_multiplex\r\nfrom time import sleep\r\nfrom Resnet50SmoothNet.vis_utils import show_img_multiplex_cutoff\r\nfrom loss import calculate_metrics, calculate_metrics_dice\r\nfrom ResNet3D.patch_utils import get_patch_from_array_around_ranch, fuse_array2complete_matrix\r\nfrom Resnet50SmoothNet.vis_utils import show_line_chart\r\n# from ResNet3D.patch_utils import get_patch_from_array_around_ranch\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\n\r\n\r\nclass CustomError(Exception):\r\n def __init__(self, ErrorInfo):\r\n super().__init__(self) # 初始化父类\r\n self.errorinfo=ErrorInfo\r\n\r\n def __str__(self):\r\n return self.errorinfo\r\n\r\n\r\nclass PredictCase:\r\n def __init__(self):\r\n # self.case_path = case_path + \"/*\"\r\n self.patch_shape = [128, 128, 128]\r\n self.case_path = None\r\n # self.save_path = r\"/home/yk/Project/keras/dataset/BraTs19DataSet/test815_1/fuse_test_816\"\r\n # self.save_path = r\"/home/yk/Project/keras/dataset/BraTs19DataSet/brate19_prediction/pre_818_train\"\r\n self.save_path = r\"/home/yk/Project/keras/dataset/BraTs19DataSet/brate19_prediction/pre_821_val\"\r\n self.reference_ata_path = r\"/home/yk/Project/keras/dataset/BraTs19DataSet/BraTs19Mixture_N4_HM_Norm/\" \\\r\n r\"Train/BraTS19_2013_11_1/BraTS19_2013_11_1_flair.nii.gz\"\r\n\r\n @staticmethod\r\n def get_model_list(case_path):\r\n model_list = glob.glob(case_path)\r\n # mask_name = None\r\n image_list = []\r\n for idx, name in enumerate(model_list):\r\n # if name.split(\".\")[0].split(\"_\")[-1] != \"seg\":\r\n image_list.append(name)\r\n # elif name.split(\".\")[0].split(\"_\")[-1] == \"seg\":\r\n # mask_name = name\r\n return image_list\r\n\r\n def get_patch_from_array(self, image_array):\r\n \"\"\"\r\n\r\n :param image_array:\r\n :return:\r\n \"\"\"\r\n shape = image_array.shape\r\n patch_shape = self.patch_shape\r\n Center_coordinate = [int(shape[0] / 2), int(shape[1] / 2), int(shape[2] / 2)]\r\n # print(\"Center_coordinate\", Center_coordinate)\r\n\r\n assert (Center_coordinate[0] + int(patch_shape[0] / 2) <= shape[0]), \"Out of size range\"\r\n assert (Center_coordinate[0] - int(patch_shape[0] / 2) >= 0), \"Out of size range\"\r\n\r\n assert (Center_coordinate[0] + int(patch_shape[1] / 2) <= shape[0]), \"Out of size range\"\r\n assert (Center_coordinate[0] - int(patch_shape[1] / 2) >= 0), \"Out of size range\"\r\n\r\n assert (Center_coordinate[0] + int(patch_shape[2] / 2) <= shape[0]), \"Out of size range\"\r\n assert (Center_coordinate[0] - int(patch_shape[2] / 2) >= 0), \"Out of size range\"\r\n\r\n patch_data = image_array[\r\n Center_coordinate[0] - int(patch_shape[0] / 2):Center_coordinate[0] + int(patch_shape[0] / 2),\r\n Center_coordinate[1] - int(patch_shape[1] / 2):Center_coordinate[1] + int(patch_shape[1] / 2),\r\n Center_coordinate[2] - int(patch_shape[2] / 2):Center_coordinate[2] + int(patch_shape[2] / 2)]\r\n return patch_data\r\n\r\n @staticmethod\r\n def fuse_data(data1, data2, data3, data4):\r\n \"\"\"\r\n\r\n :param data1: flair\r\n :param data2: t1\r\n :param data3: t1ce\r\n :param data4: t2\r\n :return:\r\n \"\"\"\r\n fuse_data = []\r\n y1 = data1[..., np.newaxis]\r\n y2 = data2[..., np.newaxis]\r\n y3 = data3[..., np.newaxis]\r\n y4 = data4[..., np.newaxis]\r\n fuse_data.append(np.concatenate((y1, y2, y3, y4), axis=-1))\r\n return np.array(fuse_data)\r\n\r\n @staticmethod\r\n def bin_label(label_data, region_type=\"whole\", all_labels=True):\r\n \"\"\"\r\n\r\n :param label_data:\r\n :param region_type:\r\n :param all_labels:\r\n :return:\r\n \"\"\"\r\n\r\n label_num = [1, 2, 4]\r\n fuse_mask = []\r\n label_data_shape = label_data.shape\r\n assert len(label_data_shape) == 3, \"The shape of label data should be 3d\"\r\n seg_labels = np.zeros((label_data_shape[0], label_data_shape[1], label_data_shape[2], len(label_num)))\r\n whole_mask = np.zeros((label_data_shape[0], label_data_shape[1], label_data_shape[2]), dtype=np.uint8)\r\n core_mask = np.zeros((label_data_shape[0], label_data_shape[1], label_data_shape[2]), dtype=np.uint8)\r\n active_mask = np.zeros((label_data_shape[0], label_data_shape[1], label_data_shape[2]), dtype=np.uint8)\r\n # label_data[:, :, :][label_data[:, :, :] == 4] = 3\r\n try:\r\n for idx in range(len(label_num)):\r\n seg_labels[:, :, :, idx] = (label_data == int(label_num[idx])).astype(int)\r\n whole_mask = seg_labels[:, :, :, 0] + seg_labels[:, :, :, 1] + seg_labels[:, :, :, 2]\r\n fuse_mask.append(whole_mask)\r\n core_mask = seg_labels[:, :, :, 0] + seg_labels[:, :, :, 2]\r\n fuse_mask.append(core_mask)\r\n active_mask = seg_labels[:, :, :, 2]\r\n fuse_mask.append(active_mask)\r\n except Exception as error: # 捕获所有可能发生的异常\r\n print(\"ERROR:\", error)\r\n finally:\r\n pass\r\n if all_labels:\r\n fuse_mask = np.transpose(np.array(fuse_mask), [1, 2, 3, 0])\r\n # print(\"fuse_mask.shape\", fuse_mask.shape) # fuse_mask.shape (128, 128, 128, 3)\r\n return fuse_mask\r\n\r\n else:\r\n if region_type == \"whole\":\r\n return whole_mask\r\n elif region_type == \"core\":\r\n return core_mask\r\n elif region_type == \"active\":\r\n return active_mask\r\n else:\r\n raise CustomError('Parameter values need to be selected from \"whole\", \"core\" and \"active\"')\r\n\r\n def _get_data(self, model_list, seg_name):\r\n \"\"\"\r\n\r\n :param model_list:\r\n :param seg_name:\r\n :return:\r\n \"\"\"\r\n data = []\r\n for idx, model_name in enumerate(model_list):\r\n data.append(self.get_patch_from_array(nib.load(model_name).get_data()))\r\n fuse_data = self.fuse_data(data[0], data[1], data[2], data[3])\r\n\r\n mask_data = self.bin_label(self.get_patch_from_array(nib.load(seg_name).get_data()))\r\n return fuse_data, mask_data\r\n\r\n def get_patch_x(self, model_list, location=\"00\"):\r\n\r\n data = []\r\n for idx, model_name in enumerate(model_list):\r\n data.append(get_patch_from_array_around_ranch(nib.load(model_name).get_data(), location))\r\n fuse_data = self.fuse_data(data[0], data[1], data[2], data[3])\r\n return fuse_data\r\n\r\n\r\n pass\r\n\r\n @staticmethod\r\n def nii_data_read_nib(data_path, header=True):\r\n \"\"\"\r\n\r\n :param data_path: nii data path\r\n :param header: True or False\r\n :return: 3D array # (H, W, D)\r\n \"\"\"\r\n img = nib.load(data_path)\r\n image_array_data = img.get_data()\r\n if header:\r\n return image_array_data, img.affine, img.header\r\n else:\r\n return image_array_data\r\n\r\n def save_nib(self, img_data, filename):\r\n \"\"\"\r\n\r\n :param img_data:\r\n :param image:\r\n :param filename:\r\n :return:\r\n \"\"\"\r\n _, affine, _ = self.nii_data_read_nib(self.reference_ata_path)\r\n new_image = nib.Nifti1Image(img_data, affine=affine)\r\n nib.save(new_image, filename)\r\n\r\n @staticmethod\r\n def re_array(final_shape, center_array):\r\n \"\"\"\r\n\r\n :param final_shape:\r\n :param center_array:\r\n :return:\r\n \"\"\"\r\n shape = final_shape\r\n patch_shape = [128, 128, 128]\r\n final_array = np.zeros(final_shape)\r\n Center_coordinate = [int(shape[0] / 2), int(shape[1] / 2), int(shape[2] / 2)]\r\n final_array[Center_coordinate[0] - int(patch_shape[0] / 2):Center_coordinate[0] + int(patch_shape[0] / 2),\r\n Center_coordinate[1] - int(patch_shape[1] / 2):Center_coordinate[1] + int(patch_shape[1] / 2),\r\n Center_coordinate[2] - int(patch_shape[2] / 2):Center_coordinate[2] + int(patch_shape[2] / 2)] \\\r\n = center_array\r\n # final_array[0:P_shape[0], shape[1] - P_shape[1]:shape[1], 0:P_shape[2]]=\r\n\r\n return final_array\r\n\r\n @staticmethod\r\n def re_array_x(final_shape, patch_x_array, patch_num):\r\n \"\"\"\r\n\r\n :param final_shape:\r\n :param patch_x_array:\r\n :param patch_num:\r\n :return:\r\n \"\"\"\r\n assert patch_num in [\"00\", \"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"cc\"]\r\n shape = (240, 240, 155) # image shape\r\n P_shape = (128, 128, 128) # patch shape\r\n patch_shape = P_shape\r\n Center_coordinate = [int(shape[0] / 2), int(shape[1] / 2), int(shape[2] / 2)]\r\n # Center_coordinate = (120, 120, 77) # Center coordinate\r\n final_array = np.zeros(final_shape)\r\n\r\n if patch_num == \"00\":\r\n final_array[0:P_shape[0], shape[1] - P_shape[1]:shape[1], 0:P_shape[2]] = patch_x_array\r\n return final_array\r\n elif patch_num == \"01\":\r\n final_array[shape[0] - P_shape[0]:shape[0], shape[1] - P_shape[1]:shape[1], 0:P_shape[2]] = patch_x_array\r\n return final_array\r\n elif patch_num == \"02\":\r\n final_array[0:P_shape[0], 0:P_shape[1], 0:P_shape[2]] = patch_x_array\r\n return final_array\r\n elif patch_num == \"03\":\r\n final_array[shape[0] - P_shape[0]:shape[0], 0:P_shape[1], 0:P_shape[2]] = patch_x_array\r\n return final_array\r\n elif patch_num == \"04\":\r\n final_array[0:P_shape[0], shape[1] - P_shape[1]:shape[1], shape[2] - P_shape[2]:shape[2]] = patch_x_array\r\n return final_array\r\n elif patch_num == \"05\":\r\n final_array[shape[0] - P_shape[0]:shape[0], shape[1] - P_shape[1]:shape[1], shape[2] - P_shape[2]:shape[2]]\\\r\n = patch_x_array\r\n return final_array\r\n elif patch_num == \"06\":\r\n final_array[0:P_shape[0], 0:P_shape[1], shape[2] - P_shape[2]:shape[2]] = patch_x_array\r\n return final_array\r\n elif patch_num == \"07\":\r\n final_array[shape[0] - P_shape[0]:shape[0], 0:P_shape[1], shape[2] - P_shape[2]:shape[2]] = patch_x_array\r\n return final_array\r\n elif patch_num == \"cc\":\r\n final_array[Center_coordinate[0] - int(patch_shape[0] / 2):Center_coordinate[0] + int(patch_shape[0] / 2),\r\n Center_coordinate[1] - int(patch_shape[1] / 2):Center_coordinate[1] + int(patch_shape[1] / 2),\r\n Center_coordinate[2] - int(patch_shape[2] / 2):Center_coordinate[2] + int(patch_shape[2] / 2)]\\\r\n = patch_x_array\r\n return final_array\r\n\r\n def processing(self, weights_path, r_path):\r\n \"\"\"\r\n\r\n :param weights_path:\r\n :param r_path:\r\n :return:\r\n \"\"\"\r\n case_path_list = glob.glob(r_path + \"/*\")\r\n num = len(case_path_list)\r\n print(\"case_num\", num)\r\n\r\n input_layer, output = ResUNet()\r\n model = Model(inputs=input_layer, outputs=output)\r\n model.load_weights(weights_path)\r\n\r\n for index, case_name in enumerate(case_path_list):\r\n print(index, case_name)\r\n create_case_path = os.path.join(self.save_path, case_name.split(\"/\")[-1])\r\n print(\"create_case_path\", create_case_path)\r\n case_name = case_name + \"/*\"\r\n one_case_model_ab_path_list = self.get_model_list(case_name)\r\n # print(one_case_model_ab_path_list) # t2, t1, flair, t1ce\r\n # print(one_case_seg_ab_path_list)\r\n data_sets = []\r\n data_sets01 = []\r\n optimal_threshold = 0.55\r\n for index_, patch_num in enumerate([\"00\", \"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"cc\"]):\r\n one_case_fuse_data = \\\r\n self.get_patch_x(one_case_model_ab_path_list,\r\n location=patch_num)\r\n # (1, 128, 128, 128, 4) (128, 128, 128, 3)\r\n # print(one_case_fuse_data.shape, one_case_mask_data.shape)\r\n pre_mask = model.predict(one_case_fuse_data, batch_size=1) # (1, 128, 128, 128, 3)\r\n predict_mask_01 = pre_mask > optimal_threshold\r\n data_sets.append(pre_mask)\r\n data_sets01.append(predict_mask_01)\r\n\r\n fused_predict_wt = fuse_array2complete_matrix(data_sets01[0][0, :, :, :, 0], data_sets01[1][0, :, :, :, 0],\r\n data_sets01[2][0, :, :, :, 0], data_sets01[3][0, :, :, :, 0],\r\n data_sets01[4][0, :, :, :, 0], data_sets01[5][0, :, :, :, 0],\r\n data_sets01[6][0, :, :, :, 0], data_sets01[7][0, :, :, :, 0],\r\n data_sets01[8][0, :, :, :, 0])\r\n\r\n fused_predict_tc = fuse_array2complete_matrix(data_sets01[0][0, :, :, :, 1], data_sets01[1][0, :, :, :, 1],\r\n data_sets01[2][0, :, :, :, 1], data_sets01[3][0, :, :, :, 1],\r\n data_sets01[4][0, :, :, :, 1], data_sets01[5][0, :, :, :, 1],\r\n data_sets01[6][0, :, :, :, 1], data_sets01[7][0, :, :, :, 1],\r\n data_sets01[8][0, :, :, :, 1])\r\n\r\n fused_predict_et = fuse_array2complete_matrix(data_sets01[0][0, :, :, :, 2], data_sets01[1][0, :, :, :, 2],\r\n data_sets01[2][0, :, :, :, 2], data_sets01[3][0, :, :, :, 2],\r\n data_sets01[4][0, :, :, :, 2], data_sets01[5][0, :, :, :, 2],\r\n data_sets01[6][0, :, :, :, 2], data_sets01[7][0, :, :, :, 2],\r\n data_sets01[8][0, :, :, :, 2])\r\n\r\n fused_predict = np.zeros_like(fused_predict_wt)\r\n fused_predict[fused_predict_wt == 1] = 2\r\n fused_predict[fused_predict_tc == 1] = 1\r\n fused_predict[fused_predict_et == 1] = 4\r\n\r\n pre_filename_complete = create_case_path + \".nii.gz\" # multi-class label map 1 2 4\r\n pre_whole = create_case_path + \"_unc_whole.nii.gz\" # 1 2 4\r\n pre_core = create_case_path + \"_unc_core.nii.gz\" # 1 4\r\n pre_enhance = create_case_path + \"_unc_enhance.nii.gz\" # 4\r\n\r\n print(\"filename_complete\", pre_filename_complete)\r\n\r\n self.save_nib(fused_predict, pre_filename_complete)\r\n self.save_nib(fused_predict_wt, pre_whole)\r\n self.save_nib(fused_predict_tc, pre_core)\r\n self.save_nib(fused_predict_et, pre_enhance)\r\n # self.save_nib(fused_ground, true_filename_complete)\r\n \"\"\"\r\n 2. {ID}_unc_whole.nii.gz (Uncertainty map associated with whole tumor)\r\n 3. {ID}_unc_core.nii.gz (Uncertainty map associated with tumor core)\r\n 4. {ID}_unc_enhance.nii.gz (Uncertainty map associated with enhancing tumor)\r\n \"\"\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # path = r\"/home/yk/Project/keras/dataset/BraTs19DataSet/BraTS_2019_Validation_HM_Norm_719\"\r\n # path = r\"/home/yk/Project/keras/dataset/BraTs19DataSet/BraTs19Mixture_N4_HM_Norm/all\"\r\n path = r\"/home/yk/Project/keras/dataset/BraTs19DataSet/BraTS_2019_Validation_HM_Norm_719\"\r\n # w_p = r\"./checkpoint_813/model3d_818_01.h5\"\r\n w_p = r\"./checkpoint_813/model3d_aug_819_01.h5\"\r\n prediction = PredictCase()\r\n prediction.processing(w_p, path)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "ResNet3d/prediction_final_version.py", "file_name": "prediction_final_version.py", "file_ext": "py", "file_size_in_byte": 16121, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 151, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 154, "usage_type": "call"}, {"api_name": "ResNet3D.patch_utils.get_patch_from_array_around_ranch", "line_number": 161, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 161, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 176, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 192, "usage_type": "call"}, {"api_name": "nibabel.save", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 230, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 271, "usage_type": "call"}, {"api_name": "ResNet3D.ResUNet3D.ResUNet", "line_number": 275, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "attribute"}, {"api_name": "ResNet3D.patch_utils.fuse_array2complete_matrix", "line_number": 301, "usage_type": "call"}, {"api_name": "ResNet3D.patch_utils.fuse_array2complete_matrix", "line_number": 307, "usage_type": "call"}, {"api_name": "ResNet3D.patch_utils.fuse_array2complete_matrix", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "563237027", "text": "from django.shortcuts import render\nfrom ..serializers import RecruiterSerializer\nfrom rest_framework.decorators import api_view,permission_classes,authentication_classes\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.response import Response\n\nfrom ..models import Recruiter,User\nfrom rest_framework.authtoken.models import Token\n@api_view(['GET'])\ndef AllRecruiters(request):\n recruiters=Recruiter.objects.all()\n serializer=RecruiterSerializer(recruiters,many=True)\n return Response(serializer.data)\n@api_view(['GET'])\ndef RecruiterById(request):\n idRecruiter=request.GET.get('id')\n recruiter=Recruiter.objects.get(id=idRecruiter)\n serializer=RecruiterSerializer(recruiter,many=False)\n return Response(serializer.data)\n@api_view(['POST'])\ndef CreateRecruiter(request):\n serializer=RecruiterSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n user=User.objects.get(external_id=serializer.data['id'])\n token, created = Token.objects.get_or_create(user=user)\n return Response({\n 'token': token.key,\n 'id': user.external_id,\n 'email': user.email,\n 'is_intern':user.is_intern,\n 'is_recruiter':user.is_recruiter\n })\n else:\n return Response('Not Saved')\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef UpdateRecruiter(request):\n recruiterId=request.data['id']\n recruiter=Recruiter.objects.get(id=recruiterId)\n serializer=RecruiterSerializer(instance=recruiter,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response('Updated')\n else:\n return Response('Something went Wrong')\n \n \n", "sub_path": "ProjectSFE/BackEnd/careerapi/career/views/RecruiterView.py", "file_name": "RecruiterView.py", "file_ext": "py", "file_size_in_byte": 1790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "models.Recruiter.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Recruiter.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Recruiter", "line_number": 11, "usage_type": "name"}, {"api_name": "serializers.RecruiterSerializer", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Recruiter.objects.get", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Recruiter.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Recruiter", "line_number": 17, "usage_type": "name"}, {"api_name": "serializers.RecruiterSerializer", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 14, "usage_type": "call"}, {"api_name": "serializers.RecruiterSerializer", "line_number": 22, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 25, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.get_or_create", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Recruiter.objects.get", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Recruiter.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Recruiter", "line_number": 41, "usage_type": "name"}, {"api_name": "serializers.RecruiterSerializer", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 47, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.authentication.TokenAuthentication", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "421582887", "text": "from openpyxl import load_workbook\nimport ldap3\nfrom ldap3 import Server, Connection, SUBTREE, ALL, NTLM\n\n\ndomain = raw_input('User Domain: ')\n\n#openpyxl load worksheet\ninputFilename = raw_input('enter input file name: ')\nwb = load_workbook(inputFilename)\nws = wb.active #0 is default\n\nmailColumn = raw_input('enter column for email: ')\nsAMAccountColumn = raw_input('enter column for sAMAccount output: ')\nuserNameColumn = raw_input ('enter column for principle name output: ')\n\n#lastRow = int(raw_input('last row: '))\n#ldap config\nif domain.upper() == 'MSNYUHEALTH':\n server_uri = 'SADSINFP317001.msnyuhealth.org'\n #search_base = 'OU=NYEE,DC=msnyuhealth,DC=org'\n search_base = 'DC=msnyuhealth,DC=org'\n login = \"msnyuhealth\\\\ftp-svc\"\nelif domain.upper() == 'NYEE':\n server_uri = 'dc-1.nyee.edu'\n search_base = 'DC=nyee,DC=edu'\n login = \"nyee\\\\ftp-svc\"\nattrs = ['sAMAccountName','userPrincipalName']\nserver = Server(server_uri, get_info=ALL)\nconn = Connection(server, user=login, password = \"InfoSys123\",authentication = NTLM)\nconn.bind()\ncurrentRow=2\ncurrentCell = mailColumn + str(currentRow)\nwhile ws[currentCell].value is not None:\n currentCell = mailColumn + str(currentRow)\n samaCell = sAMAccountColumn + str(currentRow)\n userCell = userNameColumn + str(currentRow)\n emailAddress = str(ws[currentCell].value)\n\n #server_uri = 'ldap://msnyuhealth.org'\n\n\n search_filter = '(mail=' + emailAddress + ')'\n #search_filter = '(sAMAccountName=' + emailAddress + ')'\n\n conn.search(search_base,search_filter,search_scope=SUBTREE,attributes=attrs)\n try:\n sAMAccountName = conn.entries[0].sAMAccountName.raw_values[0]\n ws[samaCell] = sAMAccountName\n except:\n ws[samaCell] = \"not found\"\n try:\n userName = conn.entries[0].userPrincipalName.raw_values[0]\n ws[userCell] = userName\n except:\n ws[userCell] = \"not found\"\n currentRow = currentRow + 1\n#outputFilename = raw_input('Enter filename for output: ')\nwb.save(inputFilename)\n", "sub_path": "search_by_email.py", "file_name": "search_by_email.py", "file_ext": "py", "file_size_in_byte": 2019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 10, "usage_type": "call"}, {"api_name": "ldap3.Server", "line_number": 29, "usage_type": "call"}, {"api_name": "ldap3.ALL", "line_number": 29, "usage_type": "name"}, {"api_name": "ldap3.Connection", "line_number": 30, "usage_type": "call"}, {"api_name": "ldap3.NTLM", "line_number": 30, "usage_type": "name"}, {"api_name": "ldap3.SUBTREE", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "162267122", "text": "\"\"\"\n Drawing methods for polarisation\n Methods of working with polarisation look at controllers.polarisation.py\n\"\"\"\nimport numpy as np\nimport pylab\n\nimport matplotlib.path as mpath\nimport matplotlib.patches as patches\n\nfrom tools import numpy_tool\nfrom tools.generators import Generator\nfrom controllers.polarization import get_param_ellipse_polar, get_str_view_polar_vec\nfrom view.matlab import general\n\n\ndef get_ellipse_points_from_alpha_beta(alpha: float, beta: float, return_point_count: int = 100, is_need_check=True):\n \"\"\"\n alpha - polarization ellipse azimuth. -pi/2 <= alpha <= pi/2\n beta - angle of ellipticity. -pi/4 <= beta <= pi/4\n return_point_count - count of return points om ome axis. Must be more than 3\n \"\"\"\n if is_need_check:\n if not (-np.pi / 2 <= alpha <= np.pi / 2) or \\\n not (-np.pi / 4 <= beta <= np.pi / 4):\n raise ValueError(f\"-pi/2 <= alpha <= pi/2, -pi/4 <= beta <= pi/4. Given val alpha = {alpha}, beta = {beta}\")\n if return_point_count <= 1:\n raise ValueError(f\"return_point_count is less than 4 ({return_point_count}).\")\n a = abs(np.cos(beta))\n b = abs(np.sin(beta))\n\n # СДЕЛАТЬ МЕТОД У ПОВЕРХНОСТЕЙ ВОЗВРАЩАЮЩИЙ МАССИВЫ С ТОЧКАМИ ДЛЯ ОТОБРАЖЕНИЯ\n # СДЕЛАТЬ ВРАЩАЮЩУЮСЯ ПОВЕРХНОСТЬ\n # и возвращать вращнный эллипс\n fi = None\n if beta < 0:\n fi = np.linspace(0, 2 * np.pi, return_point_count)\n else:\n fi = np.linspace(2 * np.pi, 0, return_point_count)\n x = a * np.cos(fi)\n y = b * np.sin(fi)\n\n if abs(alpha) > np.finfo(np.float_).eps:\n xy = numpy_tool.reshape_arrays_into_one(x, y)\n xy = np.reshape(xy, (xy.size // 2, 2))\n rot_mat = Generator.get_rot_mat_2d(-alpha)\n xy = np.matmul(xy, rot_mat)\n xy = xy.ravel()\n x, y = numpy_tool.reshape_array_into_many(xy, row_count=2, column_count=return_point_count)\n return x, y\n\n\ndef draw_arrow_to_polarization(x: (np.array, list, tuple), y: (np.array, list, tuple),\n xlim: (np.array, list, tuple), ylim: (np.array, list, tuple),\n beta: float, count_of_arrow_on_ellipse: int = 2):\n if len(xlim) != len(ylim) != 2:\n raise ValueError(f\"Limits must have length equals xlim {xlim}, ylim {ylim}\")\n xy = (xlim[0], ylim[0], xlim[1], ylim[1])\n arrow_width = np.linalg.norm(np.subtract(xy[:2], xy[2:])) * 0.005\n\n if abs(beta) > np.finfo(float).eps:\n d_b_a = len(x) // count_of_arrow_on_ellipse\n for i in range(1, count_of_arrow_on_ellipse + 1):\n dba_i_minus_1 = d_b_a * i - 1\n dx = 0\n dy = 0\n if i != count_of_arrow_on_ellipse:\n dx = x[d_b_a * i] - x[dba_i_minus_1]\n dy = y[d_b_a * i] - y[dba_i_minus_1]\n pylab.arrow(x[dba_i_minus_1], y[dba_i_minus_1], dx, dy,\n length_includes_head=True, width=arrow_width)\n else:\n dx = x[len(x) - 1] - x[len(x) - 2]\n dy = y[len(x) - 1] - y[len(x) - 2]\n pylab.arrow(x[len(x) - 2], y[len(x) - 2], dx, dy, length_includes_head=True, width=arrow_width)\n else:\n # min/max with index search\n indexes = [0, 0]\n x_val = [x[0], x[0]]\n for i in range(1, len(x)):\n if x[indexes[0]] > x[i]:\n x_val[0] = x[i]\n indexes[0] = i\n if x[indexes[1]] < x[i]:\n x_val[1] = x[i]\n indexes[1] = i\n y_val = [y[indexes[0]], y[indexes[1]]]\n for i in range(2):\n dx = 0\n dy = 0\n if indexes[i] + 1 >= len(x):\n dx = x_val[i] - x[indexes[i] - 1]\n dy = y_val[i] - y[indexes[i] - 1]\n x_val[i] = x[indexes[i] - 1]\n y_val[i] = y[indexes[i] - 1]\n else:\n dx = x[indexes[i] + 1] - x_val[i]\n dy = y[indexes[i] + 1] - y_val[i]\n\n pylab.arrow(x_val[i], y_val[i], dx, dy, length_includes_head=True, width=arrow_width)\n\n\ndef draw_polar_ellipse(vec: (np.array, list, tuple), count_drawing_point: int = 100, count_of_arrow_on_ellipse: int = 2,\n fp: int = 2, title: str = \"\", float_dtype=np.float64, **kwargs):\n \"\"\"\n Drawing polarization for given Jonson vector.\n vec - Jonson vector is two dimensional vector with complex numbers\n count_drawing_point - count of drawing point. Not recommended to use value less than 10.\n fp - float precision\n title - title of figure\n float_dtype - numpy type of float data ()\n\n\n \"\"\"\n if not (float_dtype in (np.float32, np.float64, np.complex64, np.complex128)):\n float_dtype = np.float64\n alpha_, beta = get_param_ellipse_polar(vec, float_dtype=float_dtype)\n x, y = get_ellipse_points_from_alpha_beta(alpha_, beta, is_need_check=False, return_point_count=count_drawing_point)\n color = kwargs.setdefault(\"color\", \"blue\")\n\n print(alpha_, beta)\n xlim, ylim = general.focus_on_without_cutting((-1, 1), (-1, 1), x, y, 0.1)\n pylab.xlim(*xlim)\n pylab.ylim(*ylim)\n labelstr = (f\"alpha = %.{fp}f,beta = %.{fp}f)\") % (alpha_, beta)\n pylab.plot(x, y, color=color, label=labelstr)\n if not title:\n formatstr = get_str_view_polar_vec(vec, fp, float_dtype=float_dtype)\n pylab.title(formatstr)\n else:\n pylab.title(title)\n pylab.legend()\n draw_arrow_to_polarization(x, y, xlim, ylim, beta, count_of_arrow_on_ellipse)\n\n\ndef get_rotated_triangles(deg_angle: int, rot_matrix=None):\n \"\"\"\n Make rotated triangles path for given rotating angle(degrees)\n \"\"\"\n half = 0.5\n verts = (\n ( # up triangle arrow\n (half, -half), # right, bottom\n (0., half), # center, top\n (-half, -half) # right, top\n ),\n ( # down triangle arrow\n (half, half), # right, top\n (0., -half), # center, bottom\n (-half, half) # left, top\n )\n )\n\n codes = [\n mpath.Path.MOVETO,\n mpath.Path.LINETO,\n mpath.Path.LINETO,\n ]\n if rot_matrix is None:\n deg_angle = int(deg_angle)\n rot_matrix = Generator.get_rot_mat2d_cashed_int(-deg_angle)\n\n rot_verts = np.matmul(verts, rot_matrix)\n paths = [mpath.Path(rot_verts[ii], codes) for ii in range(2)] # 2 - len(verts)\n return paths\n\n\ndef draw_ellipce_polar(ax, jones_vec, center, scale_coeff=1, color=\"blue\", markersize=None, fix_matplotlib=False):\n \"\"\"\n :param ax axes of matplotlib figure\n :param jones_vec 2 dimensional complex vector\n :param center of polarization ellipse\n :param scale_coeff scale all of plotting objects by this funciton\n :param color matplotlib color (str)\n :param markersize size of triangles, whitch viewed direction of rotation (float)\n :param fix_matplotlib set True then ellipse draw behind grid or autoscaling doesn't work (auto limits)\n fix_matplotlib call this 2 methods\n ax.set_axisbelow(True)\n ax.autoscale() # manually recalculate limits\n \"\"\"\n alpha, beta = get_param_ellipse_polar(jones_vec)\n energy = np.sqrt(np.sum(np.abs(jones_vec)))\n w_h = np.asarray(\n (energy * scale_coeff * np.cos(beta), # energy * scale_coeff * np.cos(beta)\n energy * scale_coeff * np.sin(beta)) # energy * scale_coeff * np.sin(beta)\n ) # width, height. 2 because width = 2a, heigth = 2b\n w_h = np.abs(w_h)\n angle = np.rad2deg(alpha) # 30 #\n\n # rotation markers\n line_points = np.asarray(((w_h[0], 0),\n (-w_h[0], 0)))\n rot_mat = Generator.get_rot_mat_2d(-alpha)\n line_points = np.matmul(line_points, rot_mat)\n line_points = line_points + center\n w_h = w_h * 2\n # print(\"w_h \", w_h)\n\n paths_marker = get_rotated_triangles(angle, rot_mat)\n if markersize is not None:\n markersize = markersize * scale_coeff\n\n # check it\n rng_marker = range(2) if beta < 0 else range(1, -1, -1) # 2 - count of markers\n for ii, jj in zip(range(2), rng_marker):\n line = pylab.Line2D( # put same coord to draw marker\n [line_points[ii, 0]] * 2, # x coords\n [line_points[ii, 1]] * 2, # y coords\n marker=paths_marker[jj], color=color,\n markersize=markersize\n )\n line.set_markevery(3)\n ax.add_line(line)\n\n el = patches.Ellipse(center, *w_h, angle=angle, fill=False, color=color)\n # print(el)\n ax.add_artist(el)\n # el.set_zorder(1000) # equvalent to ax.set_axisbelow(True)\n\n borders_coeff = 1.1 * scale_coeff\n sqr_side = np.max(w_h) * borders_coeff # scale coeff\n # print(np.max(w_h))\n sqr_sides = [sqr_side] * 2\n sqr_side = sqr_side / 2\n rect_vertex = np.subtract(center, (sqr_side, sqr_side))\n # print(center,sqr_sides)\n rect = patches.Rectangle(rect_vertex, *sqr_sides, fill=False, color=color)\n # print(rect)\n # ax.add_artist(rect)\n # rect.set_zorder(1000) # equvalent to ax.set_axisbelow(True)\n if fix_matplotlib:\n ax.set_axisbelow(True)\n ax.autoscale() # manually say to recalculate limits\n", "sub_path": "view/matlab/polarization.py", "file_name": "polarization.py", "file_ext": "py", "file_size_in_byte": 9307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.pi", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float_", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tools.numpy_tool.reshape_arrays_into_one", "line_number": 44, "usage_type": "call"}, {"api_name": "tools.numpy_tool", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 45, "usage_type": "call"}, {"api_name": "tools.generators.Generator.get_rot_mat_2d", "line_number": 46, "usage_type": "call"}, {"api_name": "tools.generators.Generator", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.matmul", "line_number": 47, "usage_type": "call"}, {"api_name": "tools.numpy_tool.reshape_array_into_many", "line_number": 49, "usage_type": "call"}, {"api_name": "tools.numpy_tool", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.subtract", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 61, "usage_type": "call"}, {"api_name": "pylab.arrow", "line_number": 70, "usage_type": "call"}, {"api_name": "pylab.arrow", "line_number": 75, "usage_type": "call"}, {"api_name": "pylab.arrow", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.complex128", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 116, "usage_type": "attribute"}, {"api_name": "controllers.polarization.get_param_ellipse_polar", "line_number": 117, "usage_type": "call"}, {"api_name": "view.matlab.general.focus_on_without_cutting", "line_number": 122, "usage_type": "call"}, {"api_name": "view.matlab.general", "line_number": 122, "usage_type": "name"}, {"api_name": "pylab.xlim", "line_number": 123, "usage_type": "call"}, {"api_name": "pylab.ylim", "line_number": 124, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 126, "usage_type": "call"}, {"api_name": "controllers.polarization.get_str_view_polar_vec", "line_number": 128, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 129, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 131, "usage_type": "call"}, {"api_name": "pylab.legend", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.path.Path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "matplotlib.path", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "matplotlib.path", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "matplotlib.path", "line_number": 157, "usage_type": "name"}, {"api_name": "tools.generators.Generator.get_rot_mat2d_cashed_int", "line_number": 161, "usage_type": "call"}, {"api_name": "tools.generators.Generator", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.matmul", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.path.Path", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 164, "usage_type": "name"}, {"api_name": "controllers.polarization.get_param_ellipse_polar", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 191, "usage_type": "call"}, {"api_name": "tools.generators.Generator.get_rot_mat_2d", "line_number": 193, "usage_type": "call"}, {"api_name": "tools.generators.Generator", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.matmul", "line_number": 194, "usage_type": "call"}, {"api_name": "pylab.Line2D", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.patches.Ellipse", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 215, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 227, "usage_type": "name"}]} +{"seq_id": "276759738", "text": "#! /usr/bin/env python3\r\r\n\r\r\nimport numpy as np\r\r\nimport matplotlib.pyplot as plt\r\r\nfrom sklearn.datasets import load_iris\r\r\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\r\r\nfrom sklearn.decomposition import PCA\r\r\n\r\r\n# Fetch dataset from sklearn and initialize dataset-dependent variables to be used in kmeans\r\r\ndataset = load_iris()\r\r\nx = dataset.data # Samples represented in feature space\r\r\ny = dataset.target # Ground-truth class for each sample\r\r\nn_samples = x.shape[0] # Number of samples in the dataset\r\r\nn_features = x.shape[1] # Dimension of samples' feature space\r\r\nn_classes = len(set(y)) # Number of ground-truth classes (i.e., number of unique values in y)\r\r\n\r\r\n# Hyperparameters\r\r\nn_clusters = 4 # Number of clusters to consider (e.g., equal to the number of ground-truth classes)\r\r\nn_runs = 1 # Number of time k-means will be run, and from which average performance will be computed\r\r\n\r\r\naverage_nmi = 0\r\r\n\r\r\nfor run in range(0, n_runs):\r\r\n # Initialize cluster centroids\r\r\n # indices = np.random.randint(n_samples, size = n_clusters) # Draw n_clusters samples without replacement\r\r\n indices = np.random.choice(n_samples, size = n_clusters, replace=False) # Draw n_clusters samples without replacement\r\r\n c = [x[i] for i in indices] # Use these n_clusters samples to initialize the centroids\r\r\n\r\r\n # K-Means algorithm\r\r\n diff = 1 # Number of assignments changed between successive iterations\r\r\n assignments = np.zeros(n_samples) # Cluster assignment for each sample\r\r\n while diff > 0:\r\r\n # Assign samples to clusters, based on nearest cluster centroid\r\r\n assignments_old = np.array(assignments) # Save old assignments to compute diff\r\r\n for i in range(0, n_samples):\r\r\n nearest_centroid = 0 # Index of the centroid which is the nearest from current sample x[i]\r\r\n min = float(\"inf\") # Distance between current sample x[i, :] and nearest centroid\r\r\n for j in range(0, n_clusters):\r\r\n dist = np.linalg.norm(x[i] - c[j]) # Distance between sample x[i] and centroid c[j]\r\r\n if dist < min:\r\r\n min = dist\r\r\n nearest_centroid = j\r\r\n assignments[i] = nearest_centroid\r\r\n diff = sum(assignments - assignments_old) # Is 0 iff successive assignments are unchanged\r\r\n\r\r\n # Update centroids\r\r\n for j in range(0, n_clusters):\r\r\n x_assigned_j = [x[i] for i, assignment in enumerate(assignments) if assignment == j] # Samples assigned to cluster j\r\r\n if len(x_assigned_j):\r\r\n c[j] = 1.0/len(x_assigned_j)*sum(x_assigned_j) # Cluster centroid is updated as the average of its assigned samples\r\r\n else:\r\r\n # Otherwise don't update it\r\r\n print(\"Cluster vide, centroïde inchangé.\")\r\r\n #print(assignments) # Print the cluster assignments for all samples\r\r\n\r\r\n # Evaluate the obtained clustering based on the ground-truth classes\r\r\n nmi = normalized_mutual_info_score(assignments, y)\r\r\n print(\"NMI pour l'exécution {}: {:.2f}\".format(run, nmi))\r\r\n average_nmi = average_nmi + nmi\r\r\n\r\r\n # Compare the obtained clustering and ground-truth classes by visualizing data in 2D space using PCA\r\r\n pca = PCA(n_components=2)\r\r\n x_r = pca.fit(x).transform(x) # x after dimension reduction based on a 2-component PCA\r\r\n\r\r\n # Visualization for ground-truth classes\r\r\n colors = ['navy', 'turquoise', 'darkorange']\r\r\n target_names = dataset.target_names # Labels for iris species\r\r\n plt.figure()\r\r\n for color, i, target_name in zip(colors, [0, 1, 2], target_names):\r\r\n plt.scatter(x_r[y == i, 0], x_r[y == i, 1], color=color, label=target_name)\r\r\n plt.legend(loc='best')\r\r\n plt.title('Partition réelle sur la collection Iris')\r\r\n\r\r\n # Visualization for the clustering obtained by k-means\r\r\n k = len(set(assignments))\r\r\n print(k)\r\r\n target_names = range(k) # Obtained cluster labels\r\r\n plt.figure()\r\r\n for i, target_name in zip(range(k), target_names):\r\r\n plt.scatter(x_r[assignments == i, 0], x_r[assignments == i, 1],\r\r\n label=target_name)\r\r\n plt.legend(loc='best')\r\r\n plt.title('Partition obtenue par k-means sur la collection Iris')\r\r\n\r\r\n plt.show()\r\r\n\r\r\nprint(\"NMI moyenne: {:.2f}\".format(average_nmi/n_runs))", "sub_path": "ALGO/tp_k_moyenne/correction_kmeans.py", "file_name": "correction_kmeans.py", "file_ext": "py", "file_size_in_byte": 4375, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 77, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.cluster.normalized_mutual_info_score", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}]} +{"seq_id": "501976042", "text": "import serial\r\nimport time\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\ndef main():\r\n ser = serial.Serial('COM3',9600,timeout=1)\r\n #for i in range(100):\r\n # ser.readline()\r\n ser.readline()\r\n time_np = np.zeros(1000,float)\r\n start = time.time() \r\n while 1:\r\n \r\n data=[]\r\n for i in range(1000):\r\n #ser.write(b' ')\r\n # time.sleep(0.001)\r\n \r\n sample=ser.readline().strip()\r\n data.append( int(sample))\r\n end = time.time()\r\n time_np[i] = time.time() - start\r\n # output = float(data.strip()) #\".strip\" : '\\n' , '\\r' = ' '\r\n # print(output)\r\n print(np.max(time_np))\r\n \r\n data_np=np.array(data).T\r\n plt.xlabel(\"time[s]\")\r\n plt.ylabel(\"Voltage\")\r\n plt.plot(time_np,data_np,'-.',label='Mic_0')\r\n plt.legend()\r\n plt.show()\r\n\r\n start = time.time()\r\n\r\n ser.close()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "serial_read_real_Mic_1.py", "file_name": "serial_read_real_Mic_1.py", "file_ext": "py", "file_size_in_byte": 1027, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "serial.Serial", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "155503263", "text": "import sys\nimport os.path\n##sys.path.append('C:/Users/Shaohan/PycharmProjects/ScienceProject/ScienceProject/catalog/SFM/train')\n##sys.path.append('C:/Users/Shaohan/PycharmProjects/ScienceProject/ScienceProject/catalog/SFM/train/science_data')\nfrom .build import build_model\nimport numpy as np\nimport argparse\nimport time\nimport pickle\n\n##np.set_printoptions(threshold=np.nan)\nnp.set_printoptions(threshold=1000000000000)\n\ndef predict(X_value, model_path = os.path.join(os.path.dirname(__file__),'science_data/weights3660.hdf5') , hidden_dim = 50 , freq_dim = 4, learning_rate = 0.0001):\n fn = os.path.join(os.path.dirname(__file__),'config/data')\n with open(fn,'rb') as f:\n max_data = pickle.load(f)\n min_data = pickle.load(f)\n\n model = build_model([1, hidden_dim, 1], freq_dim, learning_rate)\n # loading model\n model_path = model_path\n model.load_weights(model_path)\n\n # predition\n #print('> Predicting... ')\n X_value = np.reshape(X_value,(X_value.shape[0],1))\n X_value = (2 * X_value - (max_data + min_data)) / (max_data - min_data)\n X_value = np.reshape(X_value,(X_value.shape[0], 1, 1))\n predicted = model.predict(X_value)\n # denormalization\n prediction = (predicted[:, :, 0] * (max_data - min_data) + (max_data + min_data)) / 2\n prediction = np.reshape(prediction,prediction.shape[0])\n\n return prediction\n\ndef getdiff(prediction, realdata, step):\n\n diff_data = prediction - realdata\n anomally_each = np.load(\"../dataset/science_anomally_each.npy\")\n anomally_each = anomally_each[:,step:]\n anomally_all = np.load(\"../dataset/science_anomaly_all.npy\")\n anomally_all = anomally_all[step:]\n\n high_limit = np.zeros((len(diff_data)), dtype=np.int32)\n low_limit = np.zeros((len(diff_data)), dtype=np.int32)\n for i in range(len(diff_data)):\n diff_dict = {}\n for j in range(len(diff_data[i])):\n diff_dict[diff_data[i][j]] = anomally_each[i][j]\n keys = sorted(diff_dict.keys())\n max_radio = 0\n high_limit_i = 0\n min_radio = 0\n low_limit_i = 0\n for j,key in enumerate(keys):\n less = [ 1 for k,v in diff_dict.items() if k<=key and v==1 ]\n less_radio = sum(less) / (j+1)\n more = [ 1 for k,v in diff_dict.items() if k>=key and v==1 ]\n more_radio = sum(more) / (len(keys)-j)\n if(less_radio >= min_radio):\n min_radio = less_radio\n low_limit_i = key\n if( more_radio > max_radio):\n max_radio = more_radio\n high_limit_i = key\n low_limit[i] = int(low_limit_i)\n high_limit[i] = int(high_limit_i)\n\n with open('./config/limit', 'wb') as f:\n pickle.dump(high_limit, f)\n pickle.dump(low_limit, f)\n\n\n# Main Run Thread\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # n-step prediction\n parser.add_argument('-s', '--step', type=int, default=1)\n # data path\n parser.add_argument('-d', '--data_file', type=str, default='../dataset/science_data.npy')\n # dimension\n parser.add_argument('-hd', '--hidden_dim', type=int, default=50)\n parser.add_argument('-f', '--freq_dim', type=int, default=4)\n # training parameter\n parser.add_argument('-n', '--niter', type=int, default=4000)\n parser.add_argument('-ns', '--nsnapshot', type=int, default=20)\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001)\n\n args = parser.parse_args()\n step = args.step\n\n global_start_time = time.time()\n\n #print('> Loading data... ')\n\n data_file = args.data_file\n X_train, y_train, X_val, y_val, X_test, y_test, gt_test, max_data, min_data = build.load_data(data_file, step)\n train_len = X_train.shape[1]\n val_len = X_val.shape[1] - X_train.shape[1]\n test_len = X_test.shape[1] - X_val.shape[1]\n\n \"\"\"\n print('> Data Loaded. Compiling...')\n model = build.build_model([1, args.hidden_dim, 1], args.freq_dim, args.learning_rate)\n best_error = np.inf\n best_epoch = 0\n best_iter = 0\n\n\n \n start = time.time()\n for ii in range(int(args.niter / args.nsnapshot)):\n model.fit(\n X_train,\n y_train,\n batch_size=50,\n nb_epoch=args.nsnapshot,\n validation_split=0)\n\n num_iter = str(args.nsnapshot * (ii + 1))\n model.save_weights('./science_data/weights{}.hdf5'.format(num_iter), overwrite=True)\n\n predicted = model.predict(X_train)\n train_error = np.sum((predicted[:, :, 0] - y_train[:, :, 0]) ** 2) / (predicted.shape[0] * predicted.shape[1])\n\n print(num_iter, ' training error ', train_error)\n\n predicted = model.predict(X_val)\n print(\"predicted shape: \",predicted.shape)\n val_error = np.sum((predicted[:, -val_len:, 0] - y_val[:, -val_len:, 0]) ** 2) / (val_len * predicted.shape[0])\n\n print(' val error ', val_error)\n\n if (val_error < best_error):\n best_error = val_error\n best_iter = args.nsnapshot * (ii + 1)\n\n end = time.time()\n print('Training duration (s) : ', time.time() - global_start_time)\n print('best iteration ', best_iter)\n print('smallest error ', best_error)\n print('train time(s) : ', end - start)\n\n\n \n model = build.build_model([1, args.hidden_dim, 1], args.freq_dim, args.learning_rate)\n # loading model\n model_path = './science_data/weights{}.hdf5'.format(3660)\n model.load_weights(model_path)\n # predition\n print('> Predicting... ')\n start = time.time()\n predicted = model.predict(X_test)\n end = time.time()\n # denormalization\n prediction = (predicted[:, :, 0] * (max_data - min_data) + (max_data + min_data)) / 2\n\n error = np.sum((prediction[:, -test_len:] - gt_test[:, -test_len:]) ** 2) / (test_len * prediction.shape[0])\n print('The mean square error is: %f' % error)\n print('predict time: ', end - start)\n getdiff(prediction, gt_test , 1)\n\n \n tpr, fpr, auc = caulcute_tf(prediction,gt_test,test_len)\n all_data_tf = np.zeros((2,len(tpr)),dtype=np.float32)\n all_data_tf[0] = tpr\n all_data_tf[1] = fpr\n np.save('./Roc/SFM_ROC', all_data_tf)\n \n\n\n \n x = np.arange(0,prediction.shape[1]+step,1)\n with open(\"../dataset/nodename.pkl\",\"rb\") as f:\n nodenames = pickle.load(f)\n\n for ii in range(0, len(prediction)):\n plt.figure(facecolor='white')\n ax = plt.subplot(1,1,1)\n plt.xticks(fontsize=10)\n plt.yticks(fontsize=10)\n plt.plot( x[0:-step], gt_test[ii, :], color='orangered', lw = 2 , label='Original Data')\n plt.plot( x[step:] , prediction[ii, :], color='orangered', linestyle = '--' ,lw = 2 ,label='Prediction Data')\n #if ii == len(prediction)-1:\n ax.set_xlabel('TIMESTEMPS', fontsize=10)\n ax.set_ylabel(nodenames[ii], fontsize=10)\n ax.legend(loc=2, fontsize=10)\n\n isExists = os.path.exists('./prediction')\n if not isExists:\n # 如果不存在则创建目录\n # 创建目录操作函数\n os.makedirs('./prediction')\n plt.savefig('./prediction/{}.pdf'.format(nodenames[ii]))\n plt.show()\n \"\"\"\n\n\n\n\n", "sub_path": "realtime/SFM/train/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 7181, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.set_printoptions", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "build.build_model", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 71, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 72, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "build.load_data", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "156087595", "text": "import pygame\nimport time\nimport random\n\npygame.init()\n\nwhite = (255, 255, 255)\ngrey = (200, 200, 200)\nyellow = (255, 255, 102)\nblack = (0, 0, 0)\nred = (213, 50, 80)\ngreen = (0, 255, 0)\nblue = (50, 153, 213)\n\ndis_width = 1200\ndis_height = 600\n\ndis = pygame.display.set_mode((dis_width, dis_height))\npygame.display.set_caption('Snake Game by Edureka')\n\nclock = pygame.time.Clock()\n\nsnake_block = 10\nsnake_speed = 10\n\nfont_style = pygame.font.SysFont(\"bahnschrift\", 25)\nscore_font = pygame.font.SysFont(\"comicsansms\", 14)\n\n\nclass Snake:\n def __init__(self, snumber):\n self.Length_of_snake = 1\n self.snake_List = []\n self.x1 = 20\n self.y1 = 100\n self.x1_change = 0\n self.y1_change = 0\n self.snake_Head = []\n self.isAlive = True\n if snumber == 1:\n self.color = red\n else:\n self.color = black\n self.x1 = 20\n self.y1 = 120\n self.score = 0\n\n def draw(self):\n for x in self.snake_List:\n pygame.draw.rect(dis, self.color, [x[0], x[1], snake_block, snake_block])\n\n def get_info(self, snumber):\n value = score_font.render(\"Player \" + str(snumber) + \" score: \" + str(self.score), True, yellow)\n dis.blit(value, [0, (snumber - 1) * 20])\n if (self.isAlive == False):\n value = score_font.render(\" DIED \", True, red)\n dis.blit(value, [110, (snumber - 1) * 20])\n\n def check_walls(self):\n if self.x1 >= dis_width or self.x1 < 0 or self.y1 >= dis_height or self.y1 < 0:\n return True\n return False\n\n def check_head(self):\n for x in self.snake_List[:-1]:\n if x == self.snake_Head:\n return True\n\n def change_vals(self):\n self.x1 += self.x1_change\n self.y1 += self.y1_change\n\n def add_head(self):\n self.snake_Head = []\n self.snake_Head.append(self.x1)\n self.snake_Head.append(self.y1)\n self.snake_List.append(self.snake_Head)\n if len(self.snake_List) > self.Length_of_snake:\n del self.snake_List[0]\n\n def is_beats_wall(self, level):\n for wall in level_walls[level]:\n if self.x1 >= wall[\"start_x\"] and self.y1 >= wall[\"start_y\"] and self.x1 <= wall[\"start_x\"] + wall[\n \"weight_x\"] and self.y1 <= wall[\"start_y\"] + wall[\"weight_y\"]:\n self.x1 = random.randrange(0, wall[\"start_x\"])\n self.y1 = random.randrange(0, wall[\"start_y\"])\n return True\n return False\n\n\ndef cross_snakes(snake1, snake2):\n if snake1.isAlive == True:\n for x in snake2.snake_List:\n if snake1.x1 == x[0] and snake1.y1 == x[1]:\n return 1\n if snake2.isAlive == True:\n for x in snake1.snake_List:\n if snake2.x1 == x[0] and snake2.y1 == x[1]:\n return 2\n return 0\n\n\ndef message(msg, color):\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [dis_width / 6, dis_height / 3])\n\n\ndef check_food_for_wall(level, foodx, foody):\n for wall in level_walls[level]:\n if foodx >= wall[\"start_x\"] + snake_block and foody >= wall[\"start_y\"] - snake_block and foodx <= wall[\n \"start_x\"] + wall[\"weight_x\"] + snake_block and foody <= wall[\"start_y\"] + wall[\"weight_y\"] + snake_block:\n foodx = random.randrange(0, wall[\"start_x\"] - snake_block)\n foody = random.randrange(0, wall[\"start_y\"] - snake_block)\n return [foodx, foody]\n return [foodx, foody]\n\n\nwall1 = {\n \"start_x\": 150,\n \"start_y\": 150,\n \"weight_x\": 300,\n \"weight_y\": 100\n}\n\nwall2 = {\n \"start_x\": 150,\n \"start_y\": 350,\n \"weight_x\": 300,\n \"weight_y\": 100\n}\nwall3 = {\n \"start_x\": 800,\n \"start_y\": 100,\n \"weight_x\": 200,\n \"weight_y\": 400\n}\nwall4 = {\n \"start_x\": 100,\n \"start_y\": 100,\n \"weight_x\": 50,\n \"weight_y\": 300\n}\nwall5 = {\n \"start_x\": 220,\n \"start_y\": 220,\n \"weight_x\": 300,\n \"weight_y\": 100\n}\nwall6 = {\n \"start_x\": 600,\n \"start_y\": 100,\n \"weight_x\": 50,\n \"weight_y\": 300\n}\nwall7 = {\n \"start_x\": 900,\n \"start_y\": 100,\n \"weight_x\": 50,\n \"weight_y\": 300\n}\nlevel_walls = {\n 1: [wall1, wall2],\n 2: [wall1, wall2, wall3],\n 3: [wall4, wall5, wall6, wall7]} # walls for levels\n\nfor wall in level_walls[1]:\n print(\"wall in level 1\", wall)\n\n\ndef gameLoop(lvl):\n game_over = False\n game_close = False\n\n level = lvl\n if lvl != 0:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n coords = check_food_for_wall(level, foodx, foody)\n foodx = coords[0]\n foody = coords[1]\n print(coords)\n snake_speed = 10 + lvl * 2\n player1 = Snake(1)\n player2 = Snake(2)\n print(dis_width / 2 + 50, dis_width / 2 + 50, snake_block * 5, snake_block)\n while not game_over:\n while game_close == True:\n dis.fill(blue)\n if player1.score > player2.score:\n message(\n \"Player 1 wins! \" + str(player1.score) + \":\" + str(player2.score) + \" Press C-Play Again or Q-Quit\",\n yellow)\n elif player1.score < player2.score:\n message(\n \"Player 2 wins! \" + str(player1.score) + \":\" + str(player2.score) + \" Press C-Play Again or Q-Quit\",\n yellow)\n else:\n message(\"Equal powers! Press C-Play Again or Q-Quit\", yellow)\n\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n game_over = True\n game_close = False\n if event.key == pygame.K_c:\n gameLoop(0)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n game_over = True\n game_close = False\n if event.key == pygame.K_c:\n gameLoop(0)\n if event.key == pygame.K_LEFT:\n player1.x1_change = -snake_block\n player1.y1_change = 0\n elif event.key == pygame.K_RIGHT:\n player1.x1_change = snake_block\n player1.y1_change = 0\n elif event.key == pygame.K_UP:\n player1.y1_change = -snake_block\n player1.x1_change = 0\n elif event.key == pygame.K_DOWN:\n player1.y1_change = snake_block\n player1.x1_change = 0\n elif event.key == pygame.K_a:\n player2.x1_change = -snake_block\n player2.y1_change = 0\n elif event.key == pygame.K_d:\n player2.x1_change = snake_block\n player2.y1_change = 0\n elif event.key == pygame.K_w:\n player2.y1_change = -snake_block\n player2.x1_change = 0\n elif event.key == pygame.K_s:\n player2.y1_change = snake_block\n player2.x1_change = 0\n elif level == 0 and event.key == pygame.K_1:\n level = 1\n gameLoop(1)\n elif level == 0 and event.key == pygame.K_2:\n level = 2\n gameLoop(2)\n elif level == 0 and event.key == pygame.K_3:\n level = 3\n gameLoop(3)\n\n if level == 0:\n dis.fill(blue)\n message(\"Please choose level. Easy press - 1, Medium press - 2, Hard press - 3\", yellow)\n pygame.display.update()\n continue\n\n if player1.check_walls():\n player1.isAlive = False\n\n if player2.check_walls():\n player2.isAlive = False\n\n if player1.isAlive:\n player1.change_vals()\n if player2.isAlive:\n player2.change_vals()\n dis.fill(blue)\n pygame.draw.rect(dis, green, [foodx, foody, snake_block, snake_block])\n\n if player1.isAlive:\n player1.add_head()\n if player2.isAlive:\n player2.add_head()\n\n for wall in level_walls[level]:\n pygame.draw.rect(dis, grey, [wall[\"start_x\"], wall[\"start_y\"], wall[\"weight_x\"], wall[\"weight_y\"]])\n\n player1.draw()\n player2.draw()\n\n player1.get_info(1)\n player2.get_info(2)\n\n if player1.check_head():\n player1.isAlive = False\n\n if player2.check_head():\n player2.isAlive = False\n\n cros = cross_snakes(player1, player2)\n if cros == 1:\n player1.isAlive = False\n elif cros == 2:\n player2.isAlive = False\n\n if player1.is_beats_wall(level) == True:\n player1.isAlive = False\n\n if player2.is_beats_wall(level) == True:\n player2.isAlive = False\n\n pygame.display.update()\n\n if player1.isAlive == False and player2.isAlive == False:\n game_close = True\n\n if player1.x1 == foodx and player1.y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n player1.Length_of_snake += 1\n player1.score += 1\n\n if player2.x1 == foodx and player2.y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n player2.Length_of_snake += 1\n player2.score += 1\n\n clock.tick(snake_speed)\n\n pygame.quit()\n quit()\n\n\ngameLoop(0)", "sub_path": "week11/7.py", "file_name": "7.py", "file_ext": "py", "file_size_in_byte": 9996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pygame.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 50, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 85, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 86, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 112, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 113, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 176, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 177, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 200, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 202, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 203, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pygame.K_c", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 210, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 213, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pygame.K_c", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 219, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 222, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 228, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 234, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 237, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 240, "usage_type": "attribute"}, {"api_name": "pygame.K_1", "line_number": 243, "usage_type": "attribute"}, {"api_name": "pygame.K_2", "line_number": 246, "usage_type": "attribute"}, {"api_name": "pygame.K_3", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 256, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 270, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 270, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 278, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 278, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 304, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 304, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 310, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 311, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 316, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 317, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 323, "usage_type": "call"}]} +{"seq_id": "359335978", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import model_selection\nfrom time import sleep\nfrom sklearn import neighbors\nimport smbus #import SMBus module of I2C #import\nimport os\nimport RPi.GPIO as GPIO\nimport time\nimport firebase_admin,pyttsx3\nfrom firebase_admin import credentials\nfrom firebase_admin import db\ntouch = 2\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(18,GPIO.OUT)\nGPIO.setup(touch,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n#some MPU6050 Registers and their Address\nPWR_MGMT_1 = 0x6B\nSMPLRT_DIV = 0x19\n\nCONFIG = 0x1A\nGYRO_CONFIG = 0x1B\nINT_ENABLE = 0x38\nACCEL_XOUT_H = 0x3B\nACCEL_YOUT_H = 0x3D\nACCEL_ZOUT_H = 0x3F\nBCCEL_XOUT_H = 0x3B\nBCCEL_YOUT_H = 0x3D\nBCCEL_ZOUT_H = 0x3F\nDCCEL_XOUT_H = 0x3B\nDCCEL_YOUT_H = 0x3D\nDCCEL_ZOUT_H = 0x3F\nGYRO_XOUT_H = 0x43\nGYRO_YOUT_H = 0x45\nGYRO_ZOUT_H = 0x47\n\nMORSE_CODE_DICT = { 'A':'.-', 'B':'-...',\n 'C':'-.-.', 'D':'-..', 'E':'.',\n 'F':'..-.', 'G':'--.', 'H':'....',\n 'I':'..', 'J':'.---', 'K':'-.-',\n 'L':'.-..', 'M':'--', 'N':'-.',\n 'O':'---', 'P':'.--.', 'Q':'--.-',\n 'R':'.-.', 'S':'...', 'T':'-',\n 'U':'..-', 'V':'...-', 'W':'.--',\n 'X':'-..-', 'Y':'-.--', 'Z':'--..',\n '1':'.----', '2':'..---', '3':'...--',\n '4':'....-', '5':'.....', '6':'-....',\n '7':'--...', '8':'---..', '9':'----.',\n '0':'-----'}\n\ndef database():\n # Fetch the service account key JSON file contents\n cred = credentials.Certificate('/home/pi/Downloads/oops-49b91-firebase-adminsdk-hwhh0-9db96f5a05.json')\n # Initialize the app with a service account, granting admin privileges\n firebase_admin.initialize_app(cred, {'databaseURL': 'https://oops-49b91.firebaseio.com/'})\n ref = db.reference('oops')\n print(ref)\n print(type(ref))\n if type(ref.get())=='firebase_admin.db.Reference':\n return 0\n d = dict(ref.get())\n text = next(iter(d))\n vibit = d[text]['studentName']\n speak(vibit)\n vibration(convert(vibit.upper()))\n ref.delete()\n\ndef speak(text):\n engine = pyttsx3.init()\n engine.setProperty('rate',150)\n engine.setProperty('volume',1)\n k= text\n engine.say(k)\n engine.runAndWait()\nfirebase_admin.db.Reference\nimport speech_recognition as sr\ndef record():\n print('Speak Now !!')\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.listen(source)\n try:\n print(r.recognize_google(audio))\n vibrate(convert(r.recognize_google(audio)))\n #print(\"system predicts:\"+r.recognize_google(audio))\n except Exception:\n print(\"Something went wrong !!\")\n return str(r.recognize_google(audio))\n\n\ndef convert(message):\n dummy = ''\n for letter in message:\n if letter != ' ':\n letter.upper()\n dummy += MORSE_CODE_DICT[letter]+' '\n else:\n dummy += ' '\n return dummy\n\ndef robot(text):\n os.system(\"espeak'\"+text+\"'\")\n\ndef morse_to_text(message):\n message += ' '\n\n decipher = ''\n citext = ''\n for letter in message:\n \n \n if (letter != ' '):\n\n i = 0\n\n citext += letter\n \n \n else:\n \n i += 1\n \n \n if i == 2 :\n \n \n decipher += ' '\n else:\n \n \n decipher += list(MORSE_CODE_DICT.keys())[list(MORSE_CODE_DICT\n .values()).index(citext)]\n citext = ''\n\n return decipher\n\n\ndef vibration(result):\n n=0\n while n0):\n generated_morse_code=generated_morse_code+\".\"\n elif(g>0.25 and g<1):\n generated_morse_code=generated_morse_code+\"-\"\n elif(g>1 and g<3):\n generated_morse_code=generated_morse_code+\" \"\n elif(g>4):\n break\n gmc=generated_morse_code\n print(gmc)\n tt = morse_to_text(gmc)\n print(tt)\n speak(tt)\n\n\ndef MPU_Init():\n #write to sample rate register\n bus.write_byte_data(Device_Address, SMPLRT_DIV, 7)\n \n #Write to power management register\n bus.write_byte_data(Device_Address, PWR_MGMT_1, 1)\n \n #Write to Configuration register\n bus.write_byte_data(Device_Address, CONFIG, 0)\n \n #Write to Gyro configuration register\n bus.write_byte_data(Device_Address, GYRO_CONFIG, 24)\n \n #Write to interrupt enable register\n bus.write_byte_data(Device_Address, INT_ENABLE, 1)\ndef read_raw_data(addr):\n #Accelero and Gyro value are 16-bit\n high = bus.read_byte_data(Device_Address, addr)\n low = bus.read_byte_data(Device_Address, addr+1)\n #concatenate higher and lower value\n value = ((high << 8) | low)\n #to get signed value from mpu6050\n if(value > 32768):\n value = value - 65536\n return value\n\ndef ML():\n global bus,Device_Address\n while True:\n tf=pd.read_csv('gesturedataset.csv')\n x=tf.iloc[:, :-1].values\n y=tf.iloc[:, -1].values\n X_train,X_test,y_train,y_test=model_selection.train_test_split(x,y,test_size=0.5)\n knn=neighbors.KNeighborsClassifier(n_neighbors=1)\n knn.fit(X_train,y_train)\n i=0\n list1 =[]\n sleep(1.5)\n print('start')\n while i<=9:\n bus = smbus.SMBus(4) # or bus = smbus.SMBus(0) for older version boards\n Device_Address = 0x68 # MPU6050 device address\n MPU_Init()\n acc_x = read_raw_data(ACCEL_XOUT_H)\n acc_y = read_raw_data(ACCEL_YOUT_H)\n acc_z = read_raw_data(ACCEL_ZOUT_H)\n #Full scale range +/- 250 degree/C as per sensitivity scale factor\n Ax = int(acc_x/10)\n Ay = int(acc_y/10)\n Az = int(acc_z/10)\n list1.extend([Ax,Ay,Az])\n i=i+1\n sleep(0.3)\n print(list1)\n l=np.array(list1).reshape(1,30)\n k=knn.predict(l)\n print(knn.predict_proba(l))\n print(k)\n return(k)\n\ndef main():\n g = 0\n while True:\n sleep(2)\n if ML() == 'WR':\n g +=1\n if g<2:\n database()\n else:\n re_me=record()\n a=convert(re_me.upper())\n vibration(a)\n elif (ML() == 'Water'):\n re_me=record()\n a=convert(re_me.upper())\n vibration(a)\n else:\n read_touchsensor() \n \n# Executes the main function\nif __name__ == '__main__':\n main() \n\n", "sub_path": "morse.py", "file_name": "morse.py", "file_ext": "py", "file_size_in_byte": 7608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 15, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 15, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 15, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 16, "usage_type": "name"}, {"api_name": "RPi.GPIO.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 17, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 17, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 18, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 18, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 18, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PUD_UP", "line_number": 18, "usage_type": "attribute"}, {"api_name": "firebase_admin.credentials.Certificate", "line_number": 55, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 55, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 57, "usage_type": "call"}, {"api_name": "firebase_admin.db.reference", "line_number": 58, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 58, "usage_type": "name"}, {"api_name": "pyttsx3.init", "line_number": 71, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 77, "usage_type": "attribute"}, {"api_name": "speech_recognition.Recognizer", "line_number": 81, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 82, "usage_type": "call"}, {"api_name": "os.system", "line_number": 104, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 144, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 144, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 144, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 146, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 147, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 147, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 147, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 151, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 151, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 151, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 153, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 154, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 154, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 154, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 155, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 159, "usage_type": "call"}, {"api_name": "RPi.GPIO.input", "line_number": 167, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 167, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 171, "usage_type": "call"}, {"api_name": "RPi.GPIO.input", "line_number": 172, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 172, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 222, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 225, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 225, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 226, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 226, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 230, "usage_type": "call"}, {"api_name": "smbus.SMBus", "line_number": 233, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 247, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "64799978", "text": "# Copyright (c) 2016-2020 InSeven Limited\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport atexit\nimport logging\nimport os\nimport signal\nimport subprocess\nimport sys\nimport threading\nimport time\nimport urllib\nimport webbrowser\n\nimport watchdog.events\nimport watchdog.observers\n\nimport incontext\nimport paths\n\n\nclass CallbackEventHandler(watchdog.events.FileSystemEventHandler):\n \"\"\"Logs all the events captured.\"\"\"\n\n def __init__(self, callback):\n self._callback = callback\n\n def on_moved(self, event):\n super(CallbackEventHandler, self).on_moved(event)\n self._callback()\n\n def on_created(self, event):\n self._callback()\n\n def on_deleted(self, event):\n self._callback()\n\n def on_modified(self, event):\n self._callback()\n\n\ndef watch_directory(paths, callback):\n observer = watchdog.observers.Observer()\n for path in paths:\n observer.schedule(CallbackEventHandler(callback=callback), path, recursive=True)\n observer.start()\n return observer\n\n\nclass Builder(threading.Thread):\n\n def __init__(self, incontext, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.incontext = incontext\n self.scheduled = False\n self.lock = threading.Lock()\n self.stopped = False\n\n def schedule(self):\n logging.info(\"Scheduling build...\")\n with self.lock:\n self.scheduled = True\n\n def stop(self):\n logging.info(\"Stopping builder...\")\n with self.lock:\n self.stopped = True\n\n def run(self):\n while True:\n time.sleep(1)\n scheduled = False\n with self.lock:\n if self.stopped:\n return\n scheduled = self.scheduled\n self.scheduled = False\n if scheduled:\n try:\n self.incontext.commands[\"build\"]()\n logging.info(\"Done.\")\n except Exception as e:\n logging.error(\"Failed: %s\", e)\n\n\ndef docker(command):\n prefix = []\n if sys.platform == \"linux\":\n prefix = [\"sudo\"]\n return subprocess.run(prefix + [\"docker\"] + command)\n\n\n@incontext.command(\"watch\", help=\"watch for changes and automatically build the website\")\ndef command_watch(incontext, options):\n builder = Builder(incontext)\n builder.start()\n logging.info(\"Watching directory...\")\n observer = watch_directory([incontext.configuration.site.paths.content,\n incontext.configuration.site.paths.templates],\n builder.schedule)\n logging.info(\"Performing initial build...\")\n builder.schedule()\n try:\n while True:\n time.sleep(0.2)\n except KeyboardInterrupt:\n builder.stop()\n observer.stop()\n observer.join()\n builder.join()\n\n\n@incontext.command(\"serve\", help=\"serve a local copy of the site using a Docker nginx container\")\ndef command_serve(incontext, parser):\n container = \"incontext-nginx\"\n docker([\"rm\", \"--force\", container])\n docker([\"run\", \"--name\", container,\n \"--restart\", \"always\",\n \"-d\",\n \"-p\", \"80:80\",\n \"-v\", f\"{incontext.configuration.site.destination.files_directory}:/usr/share/nginx/html\",\n \"-v\", f\"{os.path.join(paths.ROOT_DIR, 'nginx.conf')}:/etc/nginx/conf.d/default.conf\",\n \"nginx\"])\n", "sub_path": "plugins/serve.py", "file_name": "serve.py", "file_ext": "py", "file_size_in_byte": 4428, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "watchdog.events.events", "line_number": 39, "usage_type": "attribute"}, {"api_name": "watchdog.events", "line_number": 39, "usage_type": "name"}, {"api_name": "watchdog.events.observers.Observer", "line_number": 60, "usage_type": "call"}, {"api_name": "watchdog.events.observers", "line_number": 60, "usage_type": "attribute"}, {"api_name": "watchdog.events", "line_number": 60, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 67, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 100, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 105, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 114, "usage_type": "call"}, {"api_name": "incontext.configuration", "line_number": 115, "usage_type": "attribute"}, {"api_name": "incontext.configuration", "line_number": 116, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 118, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "incontext.command", "line_number": 110, "usage_type": "call"}, {"api_name": "incontext.configuration", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "paths.ROOT_DIR", "line_number": 139, "usage_type": "attribute"}, {"api_name": "incontext.command", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "235582031", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n:py:mod:`data.py` - Download routines\n-------------------------------------\n\nThese are routines for downloading and storing the raw `K2` data, as well\nas information about planet candidates and eclipsing binaries.\n\n'''\n\nfrom __future__ import division, print_function, absolute_import, unicode_literals\nfrom .config import EVEREST_DAT, EVEREST_SRC\nfrom .crowding import Contamination\nfrom .sources import GetSources, Source\nfrom .utils import MedianFilter, Chunks\nimport k2plr as kplr\nfrom k2plr.config import KPLR_ROOT\nimport numpy as np\nimport re\nimport os\nimport sys\nimport six\nfrom six.moves import urllib\nfrom tempfile import NamedTemporaryFile\nimport shutil\ntry:\n import pyfits\nexcept ImportError:\n try:\n import astropy.io.fits as pyfits\n except ImportError:\n raise Exception('Please install the `pyfits` package.')\nimport logging\nlog = logging.getLogger(__name__)\n\nfloat_ = lambda x: float(x) if x != '' else np.nan\n\nadapter = {\n 'rowid': str,\n 'epic_name': str,\n 'tm_name': str,\n 'epic_candname': str,\n 'pl_name': str,\n 'k2c_refdisp': str,\n 'k2c_reflink': str,\n 'k2c_disp': str,\n 'k2c_note': str,\n 'k2_campaign': str,\n 'k2c_recentflag': str,\n 'ra_str': str,\n 'ra': str,\n 'dec_str': str,\n 'dec': str,\n 'pl_orbper': float_,\n 'pl_orbpererr1': float_,\n 'pl_orbpererr2': float_,\n 'pl_orbperlim': float_,\n 'pl_tranmid': float_,\n 'pl_tranmiderr1': float_,\n 'pl_tranmiderr2': float_,\n 'pl_tranmidlim': float_,\n 'pl_trandep': float_,\n 'pl_trandeperr1': float_,\n 'pl_trandeperr2': float_,\n 'pl_trandeplim': float_,\n 'pl_trandur': float_,\n 'pl_trandurerr1': float_,\n 'pl_trandurerr2': float_,\n 'pl_trandurlim': float_,\n 'pl_imppar': float_,\n 'pl_impparerr1': float_,\n 'pl_impparerr2': float_,\n 'pl_impparlim': float_,\n 'pl_orbincl': float_,\n 'pl_orbinclerr1': float_,\n 'pl_orbinclerr2': float_,\n 'pl_orbincllim': float_,\n 'pl_ratdor': float_,\n 'pl_ratdorerr1': float_,\n 'pl_ratdorerr2': float_,\n 'pl_ratdorlim': float_,\n 'pl_ratror': float_,\n 'pl_ratrorerr1': float_,\n 'pl_ratrorerr2': float_,\n 'pl_ratrorlim': float_,\n 'pl_rade': float_,\n 'pl_radeerr1': float_,\n 'pl_radeerr2': float_,\n 'pl_radelim': float_,\n 'pl_radj': float_,\n 'pl_radjerr1': float_,\n 'pl_radjerr2': float_,\n 'pl_radjlim': float_,\n 'pl_eqt': float_,\n 'pl_eqterr1': float_,\n 'pl_eqterr2': float_,\n 'pl_eqtlim': float_,\n 'pl_fppprob': float_,\n 'pl_fppproblim': float_,\n 'st_plx': float_,\n 'st_plxerr1': float_,\n 'st_plxerr2': float_,\n 'st_plxlim': float_,\n 'st_dist': float_,\n 'st_disterr1': float_,\n 'st_disterr2': float_,\n 'st_distlim': float_,\n 'st_teff': float_,\n 'st_tefferr1': float_,\n 'st_tefferr2': float_,\n 'st_tefflim': float_,\n 'st_logg': float_,\n 'st_loggerr1': float_,\n 'st_loggerr2': float_,\n 'st_logglim': float_,\n 'st_metfe': float_,\n 'st_metfeerr1': float_,\n 'st_metfeerr2': float_,\n 'st_metfelim': float_,\n 'st_metratio': str,\n 'st_rad': float_,\n 'st_raderr1': float_,\n 'st_raderr2': float_,\n 'st_radlim': float_,\n 'st_vsini': float_,\n 'st_vsinierr1': float_,\n 'st_vsinierr2': float_,\n 'st_vsinilim': float_,\n 'st_kep': float_,\n 'st_keperr': float_,\n 'st_keplim': float_,\n 'st_bj': float_,\n 'st_bjerr': float_,\n 'st_bjlim': float_,\n 'st_vj': float_,\n 'st_vjerr': float_,\n 'st_vjlim': float_,\n 'st_us': float_,\n 'st_userr': float_,\n 'st_uslim': float_,\n 'st_gs': float_,\n 'st_gserr': float_,\n 'st_gslim': float_,\n 'st_rs': float_,\n 'st_rserr': float_,\n 'st_rslim': float_,\n 'st_is': float_,\n 'st_iserr': float_,\n 'st_islim': float_,\n 'st_zs': float_,\n 'st_zserr': float_,\n 'st_zslim': float_,\n 'st_j2': float_,\n 'st_j2err': float_,\n 'st_j2lim': float_,\n 'st_h2': float_,\n 'st_h2err': float_,\n 'st_h2lim': float_,\n 'st_k2': float_,\n 'st_k2err': float_,\n 'st_k2lim': float_,\n 'st_wise1': float_,\n 'st_wise1err': float_,\n 'st_wise1lim': float_,\n 'st_wise2': float_,\n 'st_wise2err': float_,\n 'st_wise2lim': float_,\n 'st_wise3': float_,\n 'st_wise3err': float_,\n 'st_wise3lim': float_,\n 'st_wise4': float_,\n 'st_wise4err': float_,\n 'st_wise4lim': float_,\n 'st_bmvj': float_,\n 'st_bmvjerr': float_,\n 'st_bmvjlim': float_,\n 'st_jmh2': float_,\n 'st_jmh2err': float_,\n 'st_jmh2lim': float_,\n 'st_hmk2': float_,\n 'st_hmk2err': float_,\n 'st_hmk2lim': float_,\n 'st_jmk2': float_,\n 'st_jmk2err': float_,\n 'st_jmk2lim': float_\n}\n\nclass k2data(object):\n '''\n A generic `K2` data container. Nothing fancy here.\n \n '''\n \n pass\n\ndef GetK2Data(EPIC, apnum = 15, delete_kplr_data = True, clobber = False,\n calculate_contamination = True, use_k2sff_aperture = True):\n '''\n Download and save a single quarter of `K2` data.\n \n :param int EPIC: The 9-digit `EPIC` number of the target\n \n :param apnum: The number of the aperture in the `K2SFF `_ \\\n fits file to use for the photometry. Default `15`\n :type apnum: int\n \n :param delete_kplr_data: Delete the fits file downloaded with :py:mod:`kplr` \\\n after processing it? Default `True`\n :type delete_kplr_data: bool\n \n :param clobber: Overwrite existing `.npz` file? Default `False`\n :type clobber: bool\n \n :param use_k2sff_aperture: Use apertures determined by the K2SFF team? Default `True`\n :type clobber: bool\n \n :returns: \n A :class:`k2data` object containing the following attributes:\n \n - **campaign** - The `K2` campaign the target was observed in\n - **time** - The array of timestamps, in `BJD - 245833`\n - **cadn** - The long cadence number corresponding to each observation\n - **fpix** - A 3-dimensional array of shape `(nt, nx, ny)` containing the \\\n raw flux in the pixel at position `(x, y)` at each timestamp `t`\n - **perr** - The standard error on each of the data points in `fpix`\n - **apertures** - An array containing the 20 aperture images obtained from the \\\n `K2SFF `_ fits files\n - **aperture** - *Deprecated*\n - **bkg** - An estimate of the background flux at each cadence\n - **bkgerr** - The standard error on `bkg`\n - **kepmag** - The `Kepler` magnitude of the target\n - **planets** - A list of :class:`K2Planets` objects containing known planets or \\\n planet candidates for this target\n - **EB** - `False` if target is not an eclipsing binary; otherwise, a :class:`K2EB` \\\n object containing EB info taken from the `Villanova `_ \\\n eclipsing binary catalog\n - **nearby** - A list of :class:`everest.sources.Source` instances containing \\\n other `EPIC` targets within or close to this target's aperture\n - **contamination** - An estimate of the median contamination metric for the default \\\n aperture\n \n '''\n \n filename = os.path.join(KPLR_ROOT, 'data', 'everest', str(EPIC), str(EPIC) + '.npz')\n \n if not clobber:\n try:\n data = np.load(filename)\n raw_time = data['raw_time']\n raw_cadn = data['raw_cadn']\n time = data['time']\n fpix = data['fpix']\n perr = data['perr']\n campaign = data['campaign']\n aperture = data['aperture']\n cadn = data['cadn']\n _nearby = data['nearby']\n nearby = [Source(**s) for s in _nearby]\n fitsheader = data['fitsheader']\n apertures = data['apertures']\n contamination = data['contamination'][()]\n clobber = False \n except:\n clobber = True\n \n if not clobber:\n if (not (type(contamination) is float)) and calculate_contamination:\n apidx = np.where(apertures[apnum] & 1 & ~np.isnan(fpix[0])) \n bkidx = np.where(apertures[apnum] ^ 1) \n contamination = Contamination(EPIC, fpix, perr, apidx, bkidx, nearby)\n np.savez_compressed(filename, time = time, fpix = fpix, perr = perr, cadn = cadn,\n aperture = aperture, nearby = _nearby, campaign = campaign,\n apertures = apertures, fitsheader = fitsheader,\n contamination = contamination, raw_time = raw_time,\n raw_cadn = raw_cadn) \n \n if clobber:\n if not os.path.exists(os.path.join(KPLR_ROOT, 'data', 'everest', str(EPIC))):\n os.makedirs(os.path.join(KPLR_ROOT, 'data', 'everest', str(EPIC)))\n \n # Get the TPF\n client = kplr.API()\n try:\n star = client.k2_star(EPIC)\n except:\n log.error(\"Oops... The target doesn't seem to be available on MAST!\")\n return None\n \n tpf = star.get_target_pixel_files()[0]\n campaign = tpf.sci_campaign\n with tpf.open() as f:\n aperture = f[2].data\n qdata = f[1].data\n \n try:\n # Grab the K2SFF info, mainly to get the apertures\n if not use_k2sff_aperture:\n raise Exception('')\n k2sff = kplr.K2SFF(EPIC)\n apertures = k2sff.apertures\n except:\n # We will use the TPF optimal aperture (not ideal, since\n # it's smaller). We hack it into the ``apertures`` list.\n k2sff = None\n apertures = [[] for i in range(20)]\n apnew = (aperture & 2) // 2 \n \n # HACK: Make the aperture bigger by including nearest neighbors,\n # but only if there are fewer than 75 pixels.\n apnew_copy = np.array(apnew)\n for i in range(apnew.shape[0]):\n for j in range(apnew.shape[1]):\n if aperture[i][j] == 1:\n for n in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]:\n if n[0] >= 0 and n[0] < apnew.shape[0]:\n if n[1] >= 0 and n[1] < apnew.shape[1]:\n if apnew[n[0]][n[1]] == 1:\n apnew[i][j] = 1\n # Revert to original if it's too big!\n if np.sum(apnew) > 75:\n apnew = apnew_copy\n for i in range(20):\n apertures[i] = apnew \n \n # Get the arrays\n time = np.array(qdata.field('TIME'), dtype='float64')\n raw_time = np.array(time)\n cadn = np.array(qdata.field('CADENCENO'), dtype='int32')\n raw_cadn = np.array(cadn)\n fpix = np.array(qdata.field('FLUX'), dtype='float64')\n fpix_opt = np.array([f[np.where(aperture & 1)] for f in fpix], dtype='float64')\n rawc = np.array(qdata.field('RAW_CNTS'), dtype='int32')\n perr = np.array(qdata.field('FLUX_ERR'), dtype='float64')\n qual = np.array(qdata.field('QUALITY'), dtype=int)\n colmotion = np.array(qdata.field('POS_CORR1'), dtype='float64')\n rowmotion = np.array(qdata.field('POS_CORR2'), dtype='float64')\n\n # Get bad timestamps\n t_nan_inds = list(np.where(np.isnan(time))[0])\n \n # Get bad flux values\n apidx = np.where(apertures[apnum] & 1 & ~np.isnan(fpix[0])) \n bkidx = np.where(apertures[apnum] ^ 1) \n flux = np.sum(np.array([f[apidx] for f in fpix], dtype='float64'), axis = 1)\n f_nan_inds = list(np.where(np.isnan(flux))[0])\n \n # Make sure we have an aperture!\n if len(apidx[0]) == 0:\n log.error('Oops... The chosen aperture has zero size!')\n return None\n \n # Get flagged data points. Note that like K2SFF, we do not throw out all data\n # points flagged with bit #15, but treat them separately. See \"LDE Flags\" in \n # http://keplerscience.arc.nasa.gov/k2-data-release-notes.html#k2-campaign-2\n bad_bits = [1,2,3,4,5,6,7,8,9,11,12,13,14,16,17] \n qual_inds = []\n for b in bad_bits:\n qual_inds += list(np.where(qual & 2 ** (b - 1))[0])\n \n # Treat bit #15 separately. Only remove a data point flagged with bit 15\n # if it's more than 20% away from the local median.\n med = MedianFilter(flux, 10)\n dev = np.abs((flux - med) / med)\n bit15_inds = list(set(list(np.where(dev > 0.2)[0])) & \\\n set(list(np.where(qual & 2 ** (15 - 1))[0])))\n\n # All the bad inds\n bad_inds = np.array(sorted(list(set(qual_inds + t_nan_inds + f_nan_inds + bit15_inds))))\n\n # Campaign 0 hack. The first half of campaign zero was not in fine\n # pointing. Everest actually does pretty well at detrending it, but I'm \n # actually finding a **time** offset for data before t ~ 1940. For EPIC\n # 202072596 (an EB), the primary eclipses prior to this time are offset\n # by a few cadences -- they happen later than they should. No idea why\n # this is happening, but I get *much* better folded eclipses when I remove\n # the first part of C0. Here I simply remove whatever SFF removes.\n if time[0] < 1940. and k2sff is not None:\n bad_inds = np.append(bad_inds, np.where(time < k2sff.time[0]))\n\n # Campaign 2 hack. The first 1-2 days in C2 have very different noise\n # properties than the rest of the campaign, so we'll again trust the SFF\n # cuts.\n if time[0] < 2061 and time[0] > 2059 and k2sff is not None:\n bad_inds = np.append(bad_inds, np.where(time < k2sff.time[0]))\n\n # Remove them\n time = np.delete(time, bad_inds)\n cadn = np.delete(cadn, bad_inds)\n fpix = np.delete(fpix, bad_inds, 0)\n rawc = np.delete(rawc, bad_inds, 0)\n perr = np.delete(perr, bad_inds, 0)\n colmotion = np.delete(colmotion, bad_inds)\n rowmotion = np.delete(rowmotion, bad_inds)\n \n # Get nearby targets\n _nearby = [s.__dict__ for s in GetSources(EPIC)]\n \n # Make it into an object\n nearby = [Source(**s) for s in _nearby]\n \n # Get the contamination\n if calculate_contamination:\n contamination = Contamination(EPIC, fpix, perr, apidx, bkidx, nearby)\n else:\n contamination = None\n \n # Get header info\n ftpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files', '%d' % EPIC, tpf._filename)\n fitsheader = [pyfits.getheader(ftpf, 0).cards,\n pyfits.getheader(ftpf, 1).cards,\n pyfits.getheader(ftpf, 2).cards]\n \n # Atomically write to disk.\n # http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python\n f = NamedTemporaryFile(\"wb\", delete=False)\n np.savez_compressed(f, time = time, fpix = fpix, perr = perr, cadn = cadn,\n aperture = aperture, nearby = _nearby, campaign = campaign,\n apertures = apertures, fitsheader = fitsheader,\n contamination = contamination, raw_time = raw_time,\n raw_cadn = raw_cadn)\n f.flush()\n os.fsync(f.fileno())\n f.close()\n shutil.move(f.name, filename)\n \n # Delete the kplr tpf\n if delete_kplr_data:\n os.remove(ftpf)\n if k2sff is not None:\n os.remove(k2sff._file)\n \n # Get any K2 planets associated with this EPIC\n planets = []\n for planet in GetK2Planets():\n if planet.epic_name == 'EPIC %d' % EPIC:\n if planet.epic_candname not in [p.epic_candname for p in planets]:\n planets.append(planet)\n if len(planets):\n planets = sorted(planets, key = lambda x: x.epic_candname)\n \n # Create the object to return\n res = k2data()\n res.campaign = campaign\n res.time = time\n res.cadn = cadn\n res._raw_time = raw_time\n res._raw_cadn = raw_cadn\n res.fpix = fpix\n res.apertures = apertures\n \n # Compute the background from the median outside the aperture\n binds = np.where(res.apertures[apnum] ^ 1)\n if len(binds[0]) > 0:\n res.bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype='float64'), axis = 1)\n res.bkgerr = np.nanmedian(np.array([p[binds] for p in perr], dtype='float64'), axis = 1)\n else:\n # Unable to determine the background!\n res.bkg = np.zeros_like(time)\n res.bkgerr = np.zeros_like(time)\n log.warn('Unable to compute the background flux for EPIC %d.' % EPIC)\n log.warn('Consider re-running this target with a smaller aperture.')\n \n res.perr = perr\n res.aperture = aperture\n res.planets = planets\n \n # Is this an EB?\n res.EB = False\n for eb in GetK2EBs():\n if eb.epic == EPIC:\n res.EB = eb\n continue\n \n # Get the kepler magnitude from the sources in ``nearby``\n # Sometimes the magnitude is not listed in the TPF, but\n # it's in the MAST database...\n foo = [s.kepmag for s in nearby if s.epic == EPIC]\n if len(foo):\n res.kepmag = foo[0]\n else:\n res.kepmag = np.nan\n \n # Get nearby sources\n res._nearby = _nearby\n res.nearby = nearby\n res.contamination = contamination\n \n # Fits header info\n res.fitsheader = fitsheader\n \n return res\n\nclass K2Planet(object):\n '''\n A generic `K2` planet candidate object.\n \n '''\n \n def __init__(self):\n self.epic_candname = \"\"\n \n def __repr__(self):\n return \"\" % self.epic_candname\n\nclass K2EB(object):\n '''\n A generic `K2` EB candidate object.\n \n '''\n \n def __init__(self):\n self.epic = \"\"\n \n def __repr__(self):\n return \"\" % self.epic\n\ndef Progress(run_name = 'default', campaigns = range(99)):\n '''\n Shows the progress of the de-trending runs for all campaigns.\n \n :param str run_name: The name of the desired run (sub-folder). Default `default`\n :param iterable campaigns: The list of campaigns to check. Default `[0 - 99)`\n \n '''\n \n print(\"CAMP DONE FAIL REMAIN PERC\")\n print(\"---- ---- ---- ------ ----\")\n remain = {}\n for c in campaigns:\n if os.path.exists(os.path.join(EVEREST_DAT, 'output', 'C%02d' % c)):\n path = os.path.join(EVEREST_DAT, 'output', 'C%02d' % c)\n folders = os.listdir(path)\n done = [int(f) for f in folders if os.path.exists(os.path.join(path, f, run_name, '%s.pld' % f))]\n err = [int(f) for f in folders if os.path.exists(os.path.join(path, f, run_name, '%s.err' % f))] \n all = GetK2Campaign(c)\n remain[c] = list(set(all) - set(done) - set(err))\n total = len(all)\n print(\"{:>2d}. {:>10d}{:>10d}{:>10d}{:>10.2f}\".format(c, len(done), len(err), \n total - (len(done) + len(err)), 100 * (len(done) + len(err)) / total))\n for subcampaign in range(10):\n sub = GetK2Campaign(c + 0.1 * subcampaign)\n d = len(set(done) & set(sub))\n e = len(set(err) & set(sub))\n print(\" {:>2d}{:>10d}{:>10d}{:>10d}{:>10.2f}\".format(subcampaign, d, e, \n len(sub) - (d + e), 100 * (d + e) / len(sub)))\n \n return remain\n \ndef GetK2Stars(clobber = False):\n '''\n Download and return a `dict` of all `K2` stars organized by campaign. Saves each\n campaign to a `csv` file in the `everest/tables` directory.\n \n :param bool clobber: If `True`, download and overwrite existing files. Default `False`\n \n '''\n \n # Download\n if clobber:\n client = kplr.API()\n stars = client.k2_star_list()\n for campaign in stars.keys():\n with open(os.path.join(EVEREST_SRC, 'tables', 'C%02d.csv' % campaign), 'w') as f:\n for star in stars[campaign]:\n print(star, file = f)\n \n # Return\n res = {}\n for campaign in range(100):\n f = os.path.join(EVEREST_SRC, 'tables', 'C%02d.csv' % campaign)\n if os.path.exists(f):\n stars = np.loadtxt(f, dtype = int)\n res.update({campaign: stars})\n\n return res\n\ndef Campaign(EPIC):\n '''\n Returns the campaign number for a given EPIC target.\n \n '''\n\n for campaign, stars in GetK2Stars().items():\n if EPIC in stars:\n return campaign\n return None\n\ndef RemoveBackground(EPIC):\n '''\n Returns `True` or `False`, indicating whether or not to remove the background\n flux for the target. Currently, if `campaign < 3`, returns\n `True`, otherwise returns `False`.\n \n '''\n\n if Campaign(EPIC) < 3:\n return True\n else:\n return False\n\ndef GetK2Campaign(campaign, clobber = False):\n '''\n Return all stars in a given K2 campaign.\n \n :param campaign: The K2 campaign number. If this is an :py:class:`int`, returns \\\n all targets in that campaign. If a :py:class:`float` in the form \\\n `X.Y`, runs the `Y^th` decile of campaign `X`.\n :param bool clobber: If `True`, download and overwrite existing files. Default `False`\n \n '''\n \n all = GetK2Stars(clobber = clobber)[int(campaign)]\n \n if type(campaign) is int:\n return all\n elif type(campaign) is float:\n x, y = divmod(campaign, 1)\n campaign = int(x)\n subcampaign = round(y * 10)\n return list(Chunks(all, len(all) // 10))[subcampaign]\n else:\n raise Exception('Argument `subcampaign` must be an `int` or a `float` in the form `X.Y`')\n\ndef GetK2InjectionTestStars(clobber = False):\n '''\n Download and return a dict of 2000 `K2` stars, with 100 stars per magnitude \n bin in the range 8-18. These are used for injection tests. The stars are\n saved in `everest/tables/Injections.csv`.\n \n '''\n \n # Download\n if clobber:\n client = kplr.API()\n allstars = client.k2_star_mags(stars_per_mag = 200, mags = range(8,18))\n with open(os.path.join(EVEREST_SRC, 'tables', 'Injections.csv'), 'w') as f:\n for stars in allstars: print(\", \".join([str(s) for s in stars]), file = f)\n \n # Return the flattened list\n stars = np.loadtxt(os.path.join(EVEREST_SRC, 'tables', 'Injections.csv'), \n dtype = int, delimiter = ',')\n return [item for sublist in stars for item in sublist]\n \ndef GetK2Planets():\n '''\n Returns a list of :class:`K2Planet` instances generated from the file\n `/tables/k2candidates.csv`. This file was downloaded from the\n `Exoplanet Archive `_\n on February 26, 2016.\n \n '''\n \n # Read the CSV file\n with open(os.path.join(EVEREST_SRC, 'tables', 'k2candidates.csv'), 'r') as f:\n lines = f.readlines()\n\n # Get columns\n columns = [('rowid', 'Row ID')]\n for line in lines:\n regex = re.match(\"# COLUMN (.*):[ X]+(.*)\", line)\n if regex is not None:\n columns.append((regex.groups()[0], regex.groups()[1]))\n\n # Get the data\n planets = []\n for line in lines[len(columns) + 4:]:\n planet = K2Planet()\n line = line.replace('\\n', '')\n entries = line.split(',')\n for c, e in zip(columns, entries):\n setattr(planet, c[0], adapter[c[0]](e)) \n planets.append(planet)\n\n return planets\n\ndef VillanovaBJDOffset(campaign):\n '''\n There's a strange time offset in the `Villanova` EB catalog for \n some campaigns. This function returns the time offset for a given\n campaign, which was determined empirically. These numbers have not\n been thoroughly verified.\n \n '''\n \n if campaign == 0:\n return -54833. # For 202060523, the offset is -54833.279. (!?!?)\n elif campaign == 1:\n return 0. # For 201158453, the offset is -1.94. (!?!?)\n elif campaign == 2:\n return -54833.\n elif campaign >= 3:\n return -2454833.\n\nclass EclipseTimes(object):\n '''\n A simple class that determines the times of all eclipses for a given EB.\n \n :param float t0: The time of first eclipse\n :param float period: The period in days\n :param float duration: The eclipse duration in days\n \n :returns: A :py:class:`numpy` array of the times of all transits between `start` \\\n and `stop`\n \n '''\n \n def __init__(self, t0, period, duration):\n '''\n \n '''\n \n self.t0 = t0\n self.period = period\n self.duration = duration\n \n def __call__(self, start, end):\n '''\n\n '''\n if self.duration > 0:\n return np.arange(self.t0 + np.ceil((start - self.duration - self.t0) / self.period) \n * self.period, end + self.duration, self.period)\n else:\n return np.array([], dtype = float)\n\nclass EclipseMask(object):\n '''\n An eclipse masking object for EBs.\n \n :param `EclipseTimes` primary: An instance containing the times of primary eclipse\n :param `EclipseTimes` secondary: An instance containing the times of secondary eclipse\n \n :returns: The indices in `time` that contain the primary and secondary eclipses\n \n '''\n \n def __init__(self, primary, secondary):\n self.primary = primary\n self.secondary = secondary\n \n def __call__(self, time):\n '''\n \n '''\n \n p = []; s = []\n for t in self.primary(time[0], time[-1]): \n p.extend(np.where(np.abs(time - t) < self.primary.duration / 2.)[0])\n for t in self.secondary(time[0], time[-1]): \n s.extend(np.where(np.abs(time - t) < self.secondary.duration / 2.)[0])\n \n return sorted(set(p + s))\n \ndef GetK2EBs(clobber = False):\n '''\n Grab all `K2` EBs from the pre-downloaded `Villanova` catalog, which is stored in\n `everest/tables/k2ebs.tsv`.\n \n :param bool clobber: If `True`, download and overwrite existing files. Default `False`\n \n '''\n \n # Download a new CSV file?\n if clobber or not os.path.exists(os.path.join(EVEREST_SRC, 'tables', 'k2ebs.tsv')):\n url = 'http://keplerebs.villanova.edu/results/?q={\"sort\":\"kic\",' + \\\n '\"campaign\":[\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"],' + \\\n '\"kics\":[],\"etvlong\":true,' + \\\n '\"cols\":[\"camp\",\"bjd0\",\"kic\",\"p\",\"sep\",\"pwidth\",\"swidth\"],' + \\\n '\"etvshort\":true,\"incat1\":true,\"kois\":[]}&format=csv'\n r = urllib.request.Request(url)\n handler = urllib.request.urlopen(r)\n code = handler.getcode()\n if int(code) != 200:\n raise Exception(\"Error downloading Villanova EB data.\")\n data = handler.read()\n f = NamedTemporaryFile(\"wb\", delete=False)\n f.write(data)\n f.flush()\n os.fsync(f.fileno())\n f.close()\n shutil.move(f.name, os.path.join(EVEREST_SRC, 'tables', 'k2ebs.tsv'))\n \n # Read the CSV file\n with open(os.path.join(EVEREST_SRC, 'tables', 'k2ebs.tsv'), 'r') as f:\n lines = f.readlines()\n\n # Create a list of EB objects\n EBs = []\n for line in lines:\n if line.startswith('#') or len(line) < 5:\n continue\n line = line.replace(',\\n', '')\n epic, campaign, period, bjd0, pwidth, swidth, sep = line.split(',')\n \n # Create our EB instance\n EB = K2EB()\n EB.epic = int(epic)\n EB.campaign = int(campaign.replace('K2C', ''))\n EB.period = float(period)\n EB.p0 = float(bjd0) + VillanovaBJDOffset(EB.campaign)\n EB.s0 = EB.p0 + float(sep) * EB.period\n EB.pdur = float(pwidth) * 2 * EB.period\n EB.sdur = float(swidth) * 2 * EB.period\n \n # The Villanova catalog lists some separations as 1\n # and some secondary durations as -1, presumably\n # when a secondary isn't detected. We're going to assume\n # the secondary is at primary + per / 2. for plotting\n # purposes, but we won't mask it.\n if float(sep) == 1. or float(swidth) < 0:\n EB.s0 = EB.p0 + EB.period / 2.\n EB.sdur = 0.\n \n # Some of the primary/secondary durations are just\n # pure nonsense -- for EPIC 201160662, for instance, the\n # primary duration + secondary duration equals 1.2 times\n # the period, i.e., primary and secondary overlap a bit\n # and the star is in permanent eclipse (?!?!)\n if EB.pdur + EB.sdur > 0.5 * EB.period:\n # We're just not going to bother with these EBs for now.\n continue\n\n # Get the times of primary eclipse center and secondary eclipse center\n EB.primary = EclipseTimes(EB.p0, EB.period, EB.pdur)\n EB.secondary = EclipseTimes(EB.s0, EB.period, EB.sdur)\n \n # Get the indices during all eclipses (for masking)\n EB.mask = EclipseMask(EB.primary, EB.secondary)\n \n # Append to the list\n EBs.append(EB)\n \n # Now read the user-defined list of updated EBs\n with open(os.path.join(EVEREST_SRC, 'tables', 'k2ebs_updated.tsv'), 'r') as f:\n lines = f.readlines()\n \n for line in lines:\n if line.startswith('#') or len(line) < 5:\n continue\n line = line.replace(',\\n', '')\n epic, campaign, period, bjd0, pwidth, swidth, sep = line.split(',')\n \n # Create our EB instance\n EB = K2EB()\n EB.epic = int(epic)\n EB.campaign = int(campaign.replace('K2C', ''))\n EB.period = float(period)\n EB.p0 = float(bjd0) + VillanovaBJDOffset(EB.campaign)\n EB.s0 = EB.p0 + float(sep) * EB.period\n EB.pdur = float(pwidth) * 2 * EB.period\n EB.sdur = float(swidth) * 2 * EB.period\n \n # The Villanova catalog lists some separations as 1\n # and some secondary durations as -1, presumably\n # when a secondary isn't detected. We're going to assume\n # the secondary is at primary + per / 2. for plotting\n # purposes, but we won't mask it.\n if float(sep) == 1. or float(swidth) < 0:\n EB.s0 = EB.p0 + EB.period / 2.\n EB.sdur = 0.\n \n # Get the times of primary eclipse center and secondary eclipse center\n EB.primary = EclipseTimes(EB.p0, EB.period, EB.pdur)\n EB.secondary = EclipseTimes(EB.s0, EB.period, EB.sdur)\n \n # Get the indices during all eclipses (for masking)\n EB.mask = EclipseMask(EB.primary, EB.secondary)\n \n # Append to the list\n if int(epic) not in [eb.epic for eb in EBs]:\n EBs.append(EB)\n else:\n i = np.argmax(np.array([eb.epic for eb in EBs], dtype = int) == int(epic))\n EBs[i] = EB\n \n return EBs\n\ndef ClearErrors(campaign, run_name = 'default', delete_data = False):\n '''\n Delete all output directories that contain ``.err`` files. If `delete_data`\n is `True`, also deletes the input data. This will force :py:mod:`everest` to\n re-run them.\n \n '''\n \n folder = os.path.join(EVEREST_DAT, 'output', 'C%02d' % campaign)\n count = 0\n if os.path.exists(folder):\n stars = os.listdir(folder)\n for i, EPIC in enumerate(stars):\n sys.stdout.write('\\rProcessing star %d/%d...' % (i + 1, len(stars)))\n sys.stdout.flush()\n if os.path.exists(os.path.join(folder, EPIC, run_name, '%s.err' % EPIC)):\n count += 1\n shutil.rmtree(os.path.join(folder, EPIC, run_name))\n if delete_data:\n if os.path.exists(os.path.join(KPLR_ROOT, 'data', 'everest', str(EPIC))):\n shutil.rmtree(os.path.join(KPLR_ROOT, 'data', 'everest', str(EPIC)))\n print(\"\")\n print(\"Deleted %d targets.\" % count)\n ", "sub_path": "everest/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 31179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "k2plr.config.KPLR_ROOT", "line_number": 245, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 249, "usage_type": "call"}, {"api_name": "sources.Source", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 270, "usage_type": "call"}, {"api_name": "crowding.Contamination", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "k2plr.config.KPLR_ROOT", "line_number": 279, "usage_type": "argument"}, {"api_name": "os.makedirs", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 280, "usage_type": "call"}, {"api_name": "k2plr.config.KPLR_ROOT", "line_number": 280, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "k2plr.API", "line_number": 283, "usage_type": "call"}, {"api_name": "k2plr.K2SFF", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 359, "usage_type": "call"}, {"api_name": "utils.MedianFilter", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 364, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 394, "usage_type": "call"}, {"api_name": "sources.GetSources", "line_number": 397, "usage_type": "call"}, {"api_name": "sources.Source", "line_number": 400, "usage_type": "call"}, {"api_name": "crowding.Contamination", "line_number": 404, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 409, "usage_type": "call"}, {"api_name": "k2plr.config.KPLR_ROOT", "line_number": 409, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 409, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.getheader", "line_number": 410, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 410, "usage_type": "name"}, {"api_name": "astropy.io.fits.getheader", "line_number": 411, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 411, "usage_type": "name"}, {"api_name": "astropy.io.fits.getheader", "line_number": 412, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 412, "usage_type": "name"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 417, "usage_type": "call"}, {"api_name": "os.fsync", "line_number": 423, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 425, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 429, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 460, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 482, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 531, "usage_type": "call"}, {"api_name": "os.path", "line_number": 531, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 531, "usage_type": "call"}, {"api_name": "config.EVEREST_DAT", "line_number": 531, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 532, "usage_type": "call"}, {"api_name": "config.EVEREST_DAT", "line_number": 532, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 532, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 533, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 534, "usage_type": "call"}, {"api_name": "os.path", "line_number": 534, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 534, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 535, "usage_type": "call"}, {"api_name": "os.path", "line_number": 535, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 535, "usage_type": "call"}, {"api_name": "k2plr.API", "line_number": 561, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 564, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 564, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 564, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 571, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 571, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 571, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 572, "usage_type": "call"}, {"api_name": "os.path", "line_number": 572, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 573, "usage_type": "call"}, {"api_name": "utils.Chunks", "line_number": 621, "usage_type": "call"}, {"api_name": "k2plr.API", "line_number": 635, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 637, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 637, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 637, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 641, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 641, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 641, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 641, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 655, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 655, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 655, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 661, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 722, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 722, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 725, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 749, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 749, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 751, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 751, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 765, "usage_type": "call"}, {"api_name": "os.path", "line_number": 765, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 765, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 765, "usage_type": "argument"}, {"api_name": "six.moves.urllib.request.Request", "line_number": 771, "usage_type": "call"}, {"api_name": "six.moves.urllib.request", "line_number": 771, "usage_type": "attribute"}, {"api_name": "six.moves.urllib", "line_number": 771, "usage_type": "name"}, {"api_name": "six.moves.urllib.request.urlopen", "line_number": 772, "usage_type": "call"}, {"api_name": "six.moves.urllib.request", "line_number": 772, "usage_type": "attribute"}, {"api_name": "six.moves.urllib", "line_number": 772, "usage_type": "name"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 777, "usage_type": "call"}, {"api_name": "os.fsync", "line_number": 780, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 782, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 782, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 782, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 782, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 785, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 785, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 785, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 835, "usage_type": "call"}, {"api_name": "config.EVEREST_SRC", "line_number": 835, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 835, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 874, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 874, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 887, "usage_type": "call"}, {"api_name": "config.EVEREST_DAT", "line_number": 887, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 887, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 889, "usage_type": "call"}, {"api_name": "os.path", "line_number": 889, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 890, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 892, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 892, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 893, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 893, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 894, "usage_type": "call"}, {"api_name": "os.path", "line_number": 894, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 894, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 896, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 896, "usage_type": "call"}, {"api_name": "os.path", "line_number": 896, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 898, "usage_type": "call"}, {"api_name": "os.path", "line_number": 898, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 898, "usage_type": "call"}, {"api_name": "k2plr.config.KPLR_ROOT", "line_number": 898, "usage_type": "argument"}, {"api_name": "shutil.rmtree", "line_number": 899, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 899, "usage_type": "call"}, {"api_name": "k2plr.config.KPLR_ROOT", "line_number": 899, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 899, "usage_type": "attribute"}]} +{"seq_id": "101088331", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport csv\nimport os\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nimport random\nfrom random import randrange\nfrom collections import Counter\nimport seaborn as sns\nimport operator\n\n\n# In[2]:\n\n\nos.chdir('/home/arslan/Desktop/Machine_Learning_Home Work_2/')\n\n\n# In[3]:\n\n\n# function for reading the images\n# arguments: path to the traffic sign data, for example './GTSRB/Training'\n# returns: list of images, list of corresponding labels \ndef readTrafficSigns_train(rootpath):\n '''Reads traffic sign data for German Traffic Sign Recognition Benchmark.\n\n Arguments: path to the traffic sign data, for example './GTSRB/Training'\n Returns: list of images, list of corresponding labels'''\n images = [] # images\n labels = [] # corresponding labels\n # loop over all 42 classes\n for c in range(0,43):\n prefix = rootpath + '/' + format(c, '05d') + '/' # subdirectory for class\n gtFile = open(prefix + 'GT-'+ format(c, '05d') + '.csv') # annotations file\n gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file\n next(gtReader) # skip header\n # loop over all images in current annotations file\n for row in gtReader:\n images.append(plt.imread(prefix + row[0])) # the 1th column is the filename\n labels.append(int(row[7])) # the 8th column is the label\n gtFile.close()\n return images, labels\n\n\n# In[4]:\n\n\n# function for reading the images\n# arguments: path to the traffic sign data, for example './GTSRB/Training'\n# returns: list of images, list of corresponding labels \ndef readTrafficSigns_test(rootpath):\n '''Reads traffic sign data for German Traffic Sign Recognition Benchmark.\n\n Arguments: path to the traffic sign data, for example './GTSRB/Training'\n Returns: list of images, list of corresponding labels'''\n images , labels= list(), list() # images and labels\n # loop over all 42 classes\n prefix = rootpath\n gtFile = open(prefix + '/' + 'GT-final_test.csv')\n gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file\n next(gtReader) # skip header\n # loop over all images in current annotations file\n for row in gtReader:\n images.append(plt.imread(prefix + '/' + row[0])) # the 1th column is the filename\n labels.append(int(row[7])) # the 8th column is the label\n gtFile.close()\n return images, labels\n\n\n# In[5]:\n\n\nimages, labels = readTrafficSigns_train('./GTSRB/Final_Training/Images')\nx_test, y_test = readTrafficSigns_test('./GTSRB/Final_Test/Images')\n\n\n# In[6]:\n\n\ndef transform(data):\n imgs = []\n for img in data:\n diff = abs(img.shape[0] - img.shape[1])\n i = cv2.copyMakeBorder(img, diff, diff, diff, diff, cv2.BORDER_CONSTANT)\n i = cv2.resize(i, (30, 30))\n \n i = np.asarray(i)\n i.reshape(30,30,3)\n# print(i.shape)\n# cv2.imwrite(\"img.png\",i)\n# cv2.imshow(\"image\",i)\n# cv2.waitKey(0)\n \n imgs.append(i)\n \n return imgs\n\n\nimages = transform(images)\nx_test = transform(x_test)\nprint(len(images))\nprint(len(x_test))\n\n\n# In[7]:\n\n\nimport random\n\ndef apply_brightness_contrast(input_img, brightness = 0, contrast = 0):\n\n if brightness != 0:\n if brightness > 0:\n shadow = brightness\n highlight = 255\n else:\n shadow = 0\n highlight = 255 + brightness\n alpha_b = (highlight - shadow)/255\n gamma_b = shadow\n\n buf = cv2.addWeighted(input_img, alpha_b, input_img, 0, gamma_b)\n else:\n buf = input_img.copy()\n\n if contrast != 0:\n f = 131*(contrast + 127)/(127*(131-contrast))\n alpha_c = f\n gamma_c = 127*(1-f)\n\n buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)\n\n return buf\n\ndef data_augment(image):\n \n r = random.randint(0,40)\n img_local = apply_brightness_contrast(image, r, r)\n return img_local\n\n\n# In[8]:\n\n\nfrom math import ceil\n\ndef split_train_test(x_train, y_train):\n \n size_test = (1 - 0.8)\n size_test = ceil(size_test * len(x_train))\n x_remaining = x_train[0:len(x_train)]\n y_remaining = y_train[0:len(x_train)]\n new_x, new_y = [], []\n while len(new_x) < size_test:\n lst_x = list()\n lst_y = list()\n index = randrange(30, len(x_remaining), 30)\n for i in range(index - 1, (index - 30) - 1, -1):\n lst_x.append(x_remaining.pop(i))\n lst_y.append(y_remaining.pop(i))\n new_x.extend(lst_x)\n new_y.extend(lst_y)\n return new_x, new_y, x_remaining, y_remaining\n\nx_val, y_val, x_train, y_train = split_train_test(images, labels)\n\n\n# In[9]:\n\n\nprint(len(x_train), len(x_val), len(y_train), len(y_val))\n\n\n# In[10]:\n\n\nfrom collections import Counter\nimport pandas\n\ndef bar_chart(y_train):\n \n count = Counter(y_train)\n df = pandas.DataFrame.from_dict(count, orient='index')\n df.plot(kind='bar', title='frequency', figsize=(15,10), fontsize=12)\n plt.show()\n\n\n# In[11]:\n\n\nbar_chart(y_train)\n\n\n# In[12]:\n\n\nimport numpy as np\nx_train = np.asarray(x_train)\ny_train = np.asarray(y_train)\n\nfor i in range(0,43):\n \n class_records = np.where(y_train==i)[0].size\n max_records = 2000\n if class_records != max_records:\n \n ovr_sample = max_records - class_records\n samples = x_train[np.where(y_train==i)[0]]\n X_aug = []\n Y_aug = [i] * ovr_sample\n \n for x in range(ovr_sample):\n \n img = samples[x % class_records]\n trans_img = data_augment(img)\n X_aug.append(trans_img)\n \n x_train = np.concatenate((x_train, X_aug), axis=0)\n y_train = np.concatenate((y_train, Y_aug)) \n\nbar_chart(y_train)\n\n\n# In[13]:\n\n\ndef normalize(data):\n return np.array((data - np.min(data)) / (np.max(data) - np.min(data)))\n\n\n# In[14]:\n\n\ndef ravelling(data):\n fin = []\n for x in data:\n y = np.asarray(list(x.ravel()))\n fin.append(y)\n return fin\n\n\n# In[15]:\n\n\nx_train = normalize(x_train)\n\n\n# In[16]:\n\n\nx_test = normalize(x_test)\n\n\n# In[17]:\n\n\nx_val = normalize(x_val)\n\n\n# In[18]:\n\n\nx_train = ravelling(x_train)\n\n\n# In[19]:\n\n\nx_test = ravelling(x_test)\n\n\n# In[20]:\n\n\nx_val = ravelling(x_val)\n\n\n# In[21]:\n\n\nprint(np.asarray(x_train).shape, len(y_train))\n\n\n# In[22]:\n\n\nmodel = RandomForestClassifier(n_estimators = 30)\nmodel.fit(x_train, y_train)\nval_a = model.score(x_val, y_val)\ntest_a = model.score(x_test, y_test)\nprint('Validation data accuracy is.. ' + str(val_a))\nprint('Test data accuracy is.. ' + str(test_a))\n\n\n# In[23]:\n\n\nimport sklearn\ny_pred_test = model.predict(x_test)\nreport = sklearn.metrics.classification_report(y_true= y_test, y_pred= y_pred_test)\nprint(report)\n\n", "sub_path": "ML_HW_2_(2) (3).py", "file_name": "ML_HW_2_(2) (3).py", "file_ext": "py", "file_size_in_byte": 6911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.chdir", "line_number": 25, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "cv2.copyMakeBorder", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 94, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 132, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 141, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 147, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 160, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 167, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 193, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 291, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 297, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 310, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 310, "usage_type": "attribute"}]} +{"seq_id": "110931330", "text": "\"\"\"Processing of OpenStreetMap data.\n\nThe module provides functions to work with .osm.pbf files.\n`Osmium `_ is required for most of them.\n\"\"\"\n\nimport os\nfrom subprocess import run, PIPE, DEVNULL\nimport logging\nimport tempfile\nimport functools\n\nimport numpy as np\nimport rasterio\nfrom rasterio.features import rasterize\nimport geopandas as gpd\n\nfrom geohealthaccess.preprocessing import default_compression\nfrom geohealthaccess.errors import OsmiumNotFoundError, MissingDataError\nfrom geohealthaccess.utils import human_readable_size\n\nlog = logging.getLogger(__name__)\n\n\ndef requires_osmium(func):\n \"\"\"Check that osmium is available on the system.\"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n run([\"osmium\"], stdout=DEVNULL, stderr=DEVNULL)\n except FileNotFoundError:\n raise OsmiumNotFoundError(\"Osmium not found.\")\n return func(*args, **kwargs)\n\n return wrapper\n\n\n@requires_osmium\ndef tags_filter(osm_pbf, dst_fname, expression, overwrite=True):\n \"\"\"Extract OSM objects based on their tags.\n\n The function reads an input .osm.pbf file and uses `osmium tags-filter`\n to extract the relevant objects into an output .osm.pbf file.\n\n Parameters\n ----------\n osm_pbf : str\n Path to input .osm.pbf file.\n dst_fname : str\n Path to output .osm.pbf file.\n expression : str\n Osmium tags-filter expression. See `osmium tags-filter` manpage for details.\n overwrite : bool, optional\n Overwrite existing file.\n\n Returns\n -------\n dst_fname : str\n Path to output .osm.pbf file.\n \"\"\"\n expression_parts = expression.split(\" \")\n command = [\"osmium\", \"tags-filter\", osm_pbf]\n command += expression_parts\n command += [\"-o\", dst_fname]\n if overwrite:\n command += [\"--overwrite\"]\n log.info(f\"Running command: {' '.join(command)}\")\n run(command, check=True, stdout=DEVNULL, stderr=DEVNULL)\n src_size = human_readable_size(os.path.getsize(osm_pbf))\n dst_size = human_readable_size(os.path.getsize(dst_fname))\n log.info(\n f\"Extracted {os.path.basename(dst_fname)} ({dst_size}) \"\n f\"from {os.path.basename(osm_pbf)} ({src_size}).\"\n )\n return dst_fname\n\n\n@requires_osmium\ndef to_geojson(osm_pbf, dst_fname, overwrite=True):\n \"\"\"Convert an input .osm.pbf file to a GeoJSON file.\n\n Parameters\n ----------\n osm_pbf : str\n Path to input .osm.pbf file.\n dst_fname : str\n Path to output .osm.pbf file.\n overwrite : bool, optional\n Overwrite existing file.\n\n Returns\n -------\n dst_fname : str\n Path to output GeoJSON file.\n \"\"\"\n command = [\"osmium\", \"export\", osm_pbf, \"-o\", dst_fname]\n if overwrite:\n command += [\"--overwrite\"]\n log.info(f\"Running command: {' '.join(command)}\")\n run(command, check=True, stdout=DEVNULL, stderr=DEVNULL)\n src_size = human_readable_size(os.path.getsize(osm_pbf))\n dst_size = human_readable_size(os.path.getsize(dst_fname))\n log.info(\n f\"Created {os.path.basename(dst_fname)} ({dst_size}) \"\n f\"from {os.path.basename(osm_pbf)} ({src_size}).\"\n )\n return dst_fname\n\n\n# Osmium tags-filter expression and properties of interest for each supported\n# thematic extract.\nEXTRACTS = {\n \"roads\": {\n \"expression\": \"w/highway\",\n \"properties\": [\"highway\", \"smoothness\", \"surface\", \"tracktype\"],\n \"geom_types\": [\"LineString\"],\n },\n \"water\": {\n \"expression\": \"nwr/natural=water nwr/waterway nwr/water\",\n \"properties\": [\"waterway\", \"natural\", \"water\", \"wetland\", \"boat\"],\n \"geom_types\": [\"LineString\", \"Polygon\", \"MultiPolygon\"],\n },\n \"health\": {\n \"expression\": \"nwr/amenity=clinic,doctors,hospital,pharmacy nwr/healthcare\",\n \"properties\": [\"amenity\", \"name\", \"healthcare\", \"dispensing\", \"description\"],\n \"geom_types\": [\"Point\"],\n },\n \"ferry\": {\n \"expression\": \"w/route=ferry\",\n \"properties\": [\n \"route\",\n \"duration\",\n \"motor_vehicle\",\n \"motorcar\",\n \"motorcycle\",\n \"bicycle\",\n \"foot\",\n ],\n \"geom_types\": [\"LineString\"],\n },\n}\n\n\ndef _centroid(geom):\n \"\"\"Get centroid if possible.\"\"\"\n if geom.geom_type in (\"Polygon\", \"MultiPolygon\"):\n return geom.centroid\n return geom\n\n\ndef _filter_columns(geodataframe, valid_columns):\n \"\"\"Filter columns of a given geodataframe.\"\"\"\n n_removed = 0\n for column in geodataframe.columns:\n if column not in valid_columns and column != \"geometry\":\n geodataframe = geodataframe.drop([column], axis=1)\n n_removed += 1\n log.info(f\"Removed {n_removed} columns. {len(geodataframe.columns)} remaining.\")\n return geodataframe\n\n\ndef _count_objects(osm_pbf):\n \"\"\"Count objects of each type in an .osm.pbf file.\"\"\"\n p = run([\"osmium\", \"fileinfo\", \"-e\", osm_pbf], stdout=PIPE, stderr=DEVNULL)\n fileinfo = p.stdout.decode()\n n_objects = {\"nodes\": 0, \"ways\": 0, \"relations\": 0}\n for line in fileinfo.split(\"\\n\"):\n for obj in n_objects:\n if f\"Number of {obj}\" in line:\n n_objects[obj] = int(line.split(\":\")[-1])\n return n_objects\n\n\ndef _is_empty(osm_pbf):\n \"\"\"Check if a given .osm.pbf is empty.\"\"\"\n count = _count_objects(osm_pbf)\n n_objects = sum((n for n in count.values()))\n return not bool(n_objects)\n\n\ndef thematic_extract(osm_pbf, theme, dst_fname):\n \"\"\"Extract a category of objects from an .osm.pbf file into a GeoPackage.\n\n Parameters\n ----------\n osm_pbf : str\n Path to input .osm.pbf file.\n theme : str\n Category of objects to extract (roads, water, health or ferry).\n dst_fname : str\n Path to output GeoPackage.\n\n Returns\n -------\n dst_fname : str\n Path to output GeoPackage.\n\n Raises\n ------\n MissingData\n If the input .osm.pbf file does not contain any feature related to\n the selected theme.\n \"\"\"\n if theme not in EXTRACTS:\n raise ValueError(\n f\"Theme `{theme}` is not supported. Please choose one of the following \"\n f\"options: {', '.join(EXTRACTS.keys())}.\"\n )\n expression = EXTRACTS[theme.lower()][\"expression\"]\n properties = EXTRACTS[theme.lower()][\"properties\"] + [\"geometry\"]\n geom_types = EXTRACTS[theme.lower()][\"geom_types\"]\n log.info(f\"Starting thematic extraction of {theme} objects...\")\n\n with tempfile.TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n # Filter input .osm.pbf file and export to GeoJSON with osmium-tools\n filtered = tags_filter(\n osm_pbf, os.path.join(tmpdir, \"filtered.osm.pbf\"), expression\n )\n\n # Abort if .osm.pbf is empty\n if _is_empty(filtered):\n raise MissingDataError(\n f\"No {theme} features in {os.path.basename(osm_pbf)}.\"\n )\n\n # An intermediary GeoJSON file so that data can be loaded with GeoPandas\n intermediary = to_geojson(\n filtered, os.path.join(tmpdir, \"intermediary.geojson\")\n )\n\n # Drop useless columns\n geodf = gpd.read_file(intermediary)\n log.info(f\"Loaded OSM data into a GeoDataFrame with {len(geodf)} records.\")\n geodf = _filter_columns(geodf, properties)\n\n # Convert Polygon or MultiPolygon features to Point\n if theme == \"health\":\n geodf[\"geometry\"] = geodf.geometry.apply(_centroid)\n log.info(\"Converted Polygon and MultiPolygon to Point features.\")\n\n # Drop geometries of incorrect types\n geodf = geodf[np.isin(geodf.geom_type, geom_types)]\n log.info(f\"Removed objects with invalid geom types ({len(geodf)} remaining).\")\n\n # Reset index, set CRS and save to output file\n geodf = geodf.reset_index(drop=True)\n if not geodf.crs:\n geodf.crs = {\"init\": \"epsg:4326\"}\n geodf.to_file(dst_fname, driver=\"GPKG\")\n dst_size = human_readable_size(os.path.getsize(dst_fname))\n log.info(\n f\"Saved thematric extract into {os.path.basename(dst_fname)} \"\n f\"({dst_size}).\"\n )\n\n return dst_fname\n\n\ndef create_water_raster(\n osm_water,\n dst_file,\n dst_crs,\n dst_shape,\n dst_transform,\n include_streams=False,\n geom=None,\n overwrite=False,\n):\n \"\"\"Create a raster of surface water from OSM data.\n\n Parameters\n ----------\n osm_water : str\n Path to input OSM features (.gpkg or .geojson).\n dst_file : str\n Path to output raster.\n dst_crs : CRS\n Target coordinate reference system as a rasterio CRS object.\n dst_shape : tuple of int\n Output raster shape (height, width).\n dst_transform : Affine\n Output raster transform.\n include_streams : bool, optional\n Include smallest rivers and streams.\n overwrite : bool, optional\n Overwrite existing files.\n \"\"\"\n log.info(\"Starting rasterization of OSM water objects.\")\n if os.path.isfile(dst_file) and not overwrite:\n log.info(f\"`{os.path.basename(dst_file)}` already exists. Skipping.\")\n return\n if not os.path.isfile(osm_water):\n raise MissingDataError(\"OSM water data is missing.\")\n\n water = gpd.read_file(osm_water)\n if water.crs != dst_crs:\n water = water.to_crs(dst_crs)\n water_bodies = water[water.water.isin((\"lake\", \"basin\", \"reservoir\", \"lagoon\"))]\n large_rivers = water[(water.water == \"river\") | (water.waterway == \"riverbank\")]\n small_rivers = water[water.waterway.isin((\"river\", \"canal\"))]\n streams = water[water.waterway == \"stream\"]\n features = [water_bodies, large_rivers, small_rivers]\n if include_streams:\n features.append(streams)\n\n geoms = []\n for objects in features:\n geoms += [g.__geo_interface__ for g in objects.geometry]\n log.info(f\"Found {len(geoms)} OSM water objects.\")\n\n rst = rasterize(\n geoms,\n out_shape=dst_shape,\n fill=0,\n default_value=1,\n transform=dst_transform,\n all_touched=True,\n dtype=\"uint8\",\n )\n\n dst_profile = rasterio.default_gtiff_profile\n dst_profile.update(\n count=1,\n dtype=\"uint8\",\n transform=dst_transform,\n crs=dst_crs,\n height=dst_shape[0],\n width=dst_shape[1],\n nodata=255,\n **default_compression(\"uint8\"),\n )\n\n with rasterio.open(dst_file, \"w\", **dst_profile) as dst:\n dst.write(rst, 1)\n log.info(f\"OSM water raster saved as `{os.path.basename(dst_file)}`.\")\n", "sub_path": "geohealthaccess/osm.py", "file_name": "osm.py", "file_ext": "py", "file_size_in_byte": 10623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 31, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 31, "usage_type": "name"}, {"api_name": "geohealthaccess.errors.OsmiumNotFoundError", "line_number": 33, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 28, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 69, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 69, "usage_type": "name"}, {"api_name": "geohealthaccess.utils.human_readable_size", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "geohealthaccess.utils.human_readable_size", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 101, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 101, "usage_type": "name"}, {"api_name": "geohealthaccess.utils.human_readable_size", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "geohealthaccess.utils.human_readable_size", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 165, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 165, "usage_type": "name"}, {"api_name": "subprocess.DEVNULL", "line_number": 165, "usage_type": "name"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "geohealthaccess.errors.MissingDataError", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "geopandas.read_file", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 244, "usage_type": "call"}, {"api_name": "geohealthaccess.utils.human_readable_size", "line_number": 252, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 252, "usage_type": "call"}, {"api_name": "os.path", "line_number": 252, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path", "line_number": 254, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "geohealthaccess.errors.MissingDataError", "line_number": 295, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 297, "usage_type": "call"}, {"api_name": "rasterio.features.rasterize", "line_number": 313, "usage_type": "call"}, {"api_name": "rasterio.default_gtiff_profile", "line_number": 323, "usage_type": "attribute"}, {"api_name": "geohealthaccess.preprocessing.default_compression", "line_number": 332, "usage_type": "call"}, {"api_name": "rasterio.open", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path", "line_number": 337, "usage_type": "attribute"}]} +{"seq_id": "580719781", "text": "from django.db.models.aggregates import Avg\nfrom django.http.response import Http404\nfrom django.shortcuts import get_object_or_404, render\nfrom django.db.models import Avg\nfrom .models import *\n# Create your views here.\ndef index(request):\n books = Book.objects.all().order_by(\"-rating\")\n num_books = books.count()\n avg_rating = books.aggregate(Avg(\"rating\"))\n context={\n \"books\":books,\n \"total_number_of_book\":num_books,\n \"average_rating\":avg_rating\n }\n return render(request,'book_outlet/index.html',context)\n\ndef book_detail(request,slug):\n #try:\n # book = Book.objects.get(pk=id)\n #except:\n # raise Http404()\n book = get_object_or_404(Book,slug=slug)\n context={\n \"title\":book.title,\n \"author\":book.author,\n \"rating\":book.rating,\n \"is_bestseller\":book.is_bestselling,\n }\n return render(request,'book_outlet/book_detail.html',context)", "sub_path": "book_outlet/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.models.Avg", "line_number": 10, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "407704879", "text": "\n'''**************************************************************************\n*****************************************************************************\n****** U_NET: A Deep Learning Model to Segment Breast Cancerous Cells *******\n ...... \n***************************** Author: FARADARS ******************************\n ...... \n*****************************************************************************\n**************************************************************************'''\n\n\n# 1. Import Required Modules\n\nimport os\nimport glob\nimport keras\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom keras.layers import *\nimport keras.backend as k\nfrom keras.models import *\nfrom keras.optimizers import *\nimport matplotlib.pyplot as plt\nfrom skimage.transform import resize\nfrom skimage.io import imread, imshow, imsave\nfrom keras.losses import categorical_crossentropy\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping\n\n\n\n# 2. Define Train & Test Path (Images + Mask Path for Train and Test Stages)\n\nTRAIN_IMAGE_PATH = '/home/faradars/Desktop/Samples/Inputs_Train'\nTRAIN_MASK_PATH = '/home/faradars/Desktop/Samples/Masks_Train'\nTEST_IMAGE_PATH = '/home/faradars/Desktop/Samples/Inputs_Test'\nTEST_MASK_PATH = '/home/faradars/Desktop/Samples/Masks_Test'\n\n\n\n# 3. Initialize Images and Mask Size\n\nIMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS = 64, 64, 3\n\n\n\n# 4. Define Pre_Processing Function (Region of Interest Extraction _ ROI)\n \nTrain_Mask_List = sorted(next(os.walk(TRAIN_MASK_PATH))[2])\nTest_Mask_List = sorted(next(os.walk(TEST_MASK_PATH))[2])\n\ndef Data_Proprocessing_Train():\n \n Init_Image = np.zeros((len(Train_Mask_List), 768, 896, 3), dtype = np.uint8)\n Init_Mask = np.zeros((len(Train_Mask_List), 768, 896), dtype = np.bool)\n Train_X = np.zeros((len(Train_Mask_List), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype = np.uint8)\n Train_Y = np.zeros((len(Train_Mask_List), IMG_HEIGHT, IMG_WIDTH, 1), dtype = np.bool)\n \n n = 0\n \n for mask_path in glob.glob('{}/*.TIF'.format(TRAIN_MASK_PATH)):\n \n base = os.path.basename(mask_path)\n image_ID, ext = os.path.splitext(base)\n image_path = '{}/{}_ccd.tif'.format(TRAIN_IMAGE_PATH, image_ID)\n mask = imread(mask_path)\n image = imread(image_path)\n \n y_coord, x_coord = np.where(mask == 255)\n \n y_min = min(y_coord) \n y_max = max(y_coord)\n x_min = min(x_coord)\n x_max = max(x_coord)\n \n cropped_image = image[y_min:y_max, x_min:x_max]\n cropped_mask = mask[y_min:y_max, x_min:x_max]\n \n Train_X[n] = resize(cropped_image[:,:,:IMG_CHANNELS],\n (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),\n mode = 'constant',\n anti_aliasing=True,\n preserve_range=True)\n \n Train_Y[n] = np.expand_dims(resize(cropped_mask, \n (IMG_HEIGHT, IMG_WIDTH),\n mode = 'constant',\n anti_aliasing=True,\n preserve_range=True), axis = -1)\n \n Init_Image[n] = image\n Init_Mask[n] = mask\n \n n+=1\n \n return Train_X, Train_Y, Init_Image, Init_Mask\n\nTrain_Inputs, Train_Masks, Init_Image, Init_Mask = Data_Proprocessing_Train()\n\n\ndef Data_Proprocessing_Test():\n \n\n Test_X = np.zeros((len(Test_Mask_List), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype = np.uint8)\n Test_Y = np.zeros((len(Test_Mask_List), IMG_HEIGHT, IMG_WIDTH, 1), dtype = np.bool)\n \n n = 0\n \n for mask_path in glob.glob('{}/*.TIF'.format(TEST_MASK_PATH)):\n \n base = os.path.basename(mask_path)\n image_ID, ext = os.path.splitext(base)\n image_path = '{}/{}_ccd.tif'.format(TEST_IMAGE_PATH, image_ID)\n mask = imread(mask_path)\n image = imread(image_path)\n \n y_coord, x_coord = np.where(mask == 255)\n \n y_min = min(y_coord) \n y_max = max(y_coord)\n x_min = min(x_coord)\n x_max = max(x_coord)\n \n cropped_image = image[y_min:y_max, x_min:x_max]\n cropped_mask = mask[y_min:y_max, x_min:x_max]\n \n Test_X[n] = resize(cropped_image[:,:,:IMG_CHANNELS],\n (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),\n mode = 'constant',\n anti_aliasing=True,\n preserve_range=True)\n \n Test_Y[n] = np.expand_dims(resize(cropped_mask, \n (IMG_HEIGHT, IMG_WIDTH),\n mode = 'constant',\n anti_aliasing=True,\n preserve_range=True), axis = -1)\n \n \n n+=1\n \n return Test_X, Test_Y\n\nTest_Inputs, Test_Masks = Data_Proprocessing_Test()\n\n \n # 4.1. Show The Results in Preprocessing Stage\n \nprint('Original_Image')\nimshow(Init_Image[0])\nplt.show()\n\nprint('Original_Mask')\nimshow(Init_Mask[0])\nplt.show()\n\nprint('Region_of_Interest_Image')\nimshow(Train_Inputs[0])\nplt.show()\n\nprint('Region_of_Interest_Mask')\nimshow(np.squeeze(Train_Masks[0]))\nplt.show()\n\nrows = 1\ncolumns = 4\nFigure = plt.figure(figsize=(15,15))\nImage_List = [Init_Image[0], Init_Mask[0], Train_Inputs[0], Train_Masks[0]]\n\nfor i in range(1, rows*columns + 1):\n Image = Image_List[i-1]\n Sub_Plot_Image = Figure.add_subplot(rows, columns, i)\n Sub_Plot_Image.imshow(np.squeeze(Image))\nplt.show()\n\n\n# 5. Implementation of U_NET Model for Semantic Segmentation\n\ndef U_Net_Segmentation(input_size=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)):\n \n inputs = Input(input_size)\n n = Lambda(lambda x:x/255)(inputs)\n \n \n c1 = Conv2D(16, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(n)\n c1 = Dropout(0.1)(c1)\n c1 = Conv2D(16, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c1)\n p1 = MaxPooling2D((2,2))(c1)\n\n\n c2 = Conv2D(32, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(p1)\n c2 = Dropout(0.1)(c2)\n c2 = Conv2D(32, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c2)\n p2 = MaxPooling2D((2,2))(c2)\n\n\n c3 = Conv2D(64, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(p2)\n c3 = Dropout(0.2)(c3)\n c3 = Conv2D(64, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c3)\n p3 = MaxPooling2D((2,2))(c3)\n\n\n c4 = Conv2D(128, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(p3)\n c4 = Dropout(0.2)(c4)\n c4 = Conv2D(128, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c4)\n p4 = MaxPooling2D((2,2))(c4)\n\n\n c5 = Conv2D(256, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(p4)\n c5 = Dropout(0.3)(c5)\n c5 = Conv2D(256, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c5)\n\n\n\n u6 = Conv2DTranspose(128, (2,2), strides=(2,2), padding='same')(c5)\n u6 = concatenate([u6, c4])\n c6 = Conv2D(128, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(u6)\n c6 = Dropout(0.2)(c6)\n c6 = Conv2D(128, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c6) \n\n\n u7 = Conv2DTranspose(64, (2,2), strides=(2,2), padding='same')(c6)\n u7 = concatenate([u7, c3])\n c7 = Conv2D(64, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(u7)\n c7 = Dropout(0.2)(c7)\n c7 = Conv2D(64, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c7) \n\n u8 = Conv2DTranspose(32, (2,2), strides=(2,2), padding='same')(c7)\n u8 = concatenate([u8, c2])\n c8 = Conv2D(32, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(u8)\n c8 = Dropout(0.1)(c8)\n c8 = Conv2D(32, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c8) \n \n \n u9 = Conv2DTranspose(16, (2,2), strides=(2,2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis = 3)\n c9 = Conv2D(16, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(u9)\n c9 = Dropout(0.1)(c9)\n c9 = Conv2D(16, (3,3), activation='elu', kernel_initializer='he_normal',\n padding='same')(c9) \n \n outputs = Conv2D(1,(1,1), activation='sigmoid')(c9)\n \n model = Model(inputs=[inputs], outputs=[outputs])\n model.compile(optimizer='adam', loss='binary_crossentropy', \n metrics=[Mean_IOU_Evaluator])\n model.summary()\n return model\n \n \n# 6. Define U_NET Model Evaluator (Intersection Over Union _ IOU)\n\ndef Mean_IOU_Evaluator(y_true, y_pred):\n \n prec = []\n \n for t in np.arange(0.5, 1, 0.05):\n \n y_pred_ = tf.to_int32(y_pred>t)\n score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)\n k.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([up_opt]):\n score = tf.identity(score)\n prec.append(score)\n return k.mean(k.stack(prec), axis = 0)\n\nmodel = U_Net_Segmentation()\n\n \n# 7. Show The Results per Epoch\n\nclass loss_history(keras.callbacks.Callback):\n \n def __init__ (self, x=4):\n self.x = x\n \n def on_epoch_begin(self, epoch, logs={}):\n \n imshow(Train_Inputs[self.x])\n plt.show()\n \n imshow(np.squeeze(Train_Masks[self.x]))\n plt.show()\n \n preds_train = self.model.predict(np.expand_dims(Train_Inputs[self.x], axis = 0))\n imshow(np.squeeze(preds_train[0]))\n plt.show()\n \n\nimageset = 'BCC'\nbackbone = 'UNET'\nversion = 'v1.0'\nmodel_h5 = 'model-{imageset}-{backbone}-{version}.h5'.format(imageset=imageset, \n backbone = backbone, version = version)\nmodel_h5_checkpoint = '{model_h5}.checkpoint'.format(model_h5=model_h5)\n\nearlystopper = EarlyStopping(patience=7, verbose=1)\ncheckpointer = ModelCheckpoint(model_h5_checkpoint, verbose = 1, save_best_only=True)\n\n \n \n# 8. Train U_NET Model using Training Samples\n\nresults = model.fit(Train_Inputs, Train_Masks, \n validation_split=0.1, \n batch_size=2,\n epochs=50,\n callbacks=[earlystopper, checkpointer, loss_history()])\n \n# 9. U_NET Model Evaluation using Test Samples\n\npreds_train = model.predict(Train_Inputs, verbose=1)\npreds_train_t = (preds_train>0.5).astype(np.uint8)\npreds_test = model.predict(Test_Inputs, verbose=1)\npreds_test_t = (preds_test>0.5).astype(np.uint8)\n \n# 10. Show Final Results (Segmented Images)\n\nix = random.randint(0, len(Train_Inputs)-1)\n\nprint(ix)\n\nprint('Train_Image')\nimshow(Train_Inputs[ix])\nplt.show()\n\nprint('Train_Mask')\nimshow(np.squeeze(Train_Masks[ix]))\nplt.show()\n\nprint('Segmented_Image')\nimshow(np.squeeze(preds_train[ix]))\nplt.show()\n\n\niix = random.randint(0,1)\nprint(iix)\n\nprint('Test_Image')\nimshow(Test_Inputs[iix])\nplt.show()\n\nprint('Test_Mask')\nimshow(np.squeeze(Test_Masks[iix]))\nplt.show()\n\nprint('Segmented_Test_Mask')\nimshow(np.squeeze(preds_test[iix]))\nplt.show()\n\n\n# 11. Show Loss and IOU Plots\n\n\n# 11.1. Summarize History for Loss\n\nplt.plot(results.history['loss'])\nplt.plot(results.history['val_loss'])\nplt.title('Model Loss')\nplt.ylabel('loss')\nplt.xlabel('epochs')\nplt.legend(['Training','Validation'], loc = 'upper left')\nplt.show()\n\n\n# 11.1. Summarize History for IOU\n\nplt.plot(results.history['Mean_IOU_Evaluator'])\nplt.plot(results.history['val_Mean_IOU_Evaluator'])\nplt.title('Intersection Over Union')\nplt.ylabel('IOU')\nplt.xlabel('epochs')\nplt.legend(['Training','Validation'], loc = 'upper left')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "UNET.py", "file_name": "UNET.py", "file_ext": "py", "file_size_in_byte": 11895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.walk", "line_number": 49, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 57, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "skimage.io.imread", "line_number": 66, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 69, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 85, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 105, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "skimage.io.imread", "line_number": 114, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 117, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 133, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 133, "usage_type": "call"}, {"api_name": "skimage.io.imshow", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 274, "usage_type": "call"}, {"api_name": "tensorflow.to_int32", "line_number": 276, "usage_type": "call"}, {"api_name": "tensorflow.metrics.mean_iou", "line_number": 277, "usage_type": "call"}, {"api_name": "tensorflow.metrics", "line_number": 277, "usage_type": "attribute"}, {"api_name": "keras.backend.get_session", "line_number": 278, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 278, "usage_type": "name"}, {"api_name": "tensorflow.local_variables_initializer", "line_number": 278, "usage_type": "call"}, {"api_name": "tensorflow.control_dependencies", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.identity", "line_number": 280, "usage_type": "call"}, {"api_name": "keras.backend.mean", "line_number": 282, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 282, "usage_type": "name"}, {"api_name": "keras.backend.stack", "line_number": 282, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 289, "usage_type": "attribute"}, {"api_name": "skimage.io.imshow", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 300, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 302, "usage_type": "call"}, {"api_name": "skimage.io.imshow", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 314, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 330, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 332, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 336, "usage_type": "call"}, {"api_name": "skimage.io.imshow", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 346, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 350, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 350, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 353, "usage_type": "call"}, {"api_name": "skimage.io.imshow", "line_number": 357, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 358, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 362, "usage_type": "name"}, {"api_name": "skimage.io.imshow", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 365, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 366, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 366, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 374, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 374, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 376, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 377, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 378, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 378, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 379, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 379, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 380, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 386, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 386, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 387, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 388, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 389, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 390, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 390, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}]} +{"seq_id": "502781149", "text": "\"\"\"API Call that gets Articles\"\"\"\nimport os\nfrom os.path import join, dirname\nfrom datetime import date\nfrom datetime import timedelta\nimport requests\nfrom dotenv import load_dotenv\nYESTERDAY = date.today() - timedelta(days = 1)\nSOURCES = ('abc-news,associated-press,bloomberg,'\n 'cbs-news,cnn,fox-news,google-news,'\n 'independent,msnbc,medical-news-today,'\n 'nbc-news,the-hill,usa-today')\ndotenv_path = join(dirname(__file__), \"api-keys.env\")\nload_dotenv(dotenv_path)\ndef get_news(amtArticles, since = YESTERDAY.strftime(\"%yyyy-%mm-%dd\"), query = 'covid'):\n '''\n This function is used to pull articles from NEWS API\n PARAMERTERS:\n amtArticles: the amount of articles you would like to be returned\n since: what is the oldest you would like an article to be\n DEFAULT: yesterday's date\n query: what one keyword would you liek to query for\n DEFUALT: covid\n RETURNS\n an array of Article objects\n '''\n if amtArticles < 1:\n return {'Error':'Amount of articles must be > 0'}\n #ensure that the query is one word\n query = query.split()[0]\n url = ('http://newsapi.org/v2/top-headlines?'\n 'sources='+SOURCES+'&'\n 'q='+query+'&'\n 'from='+since+'&'\n 'sortBy=publishedAt&'\n 'apiKey='+ os.environ['NEWS_API_KEY'])\n response=requests.get(url)\n data = response.json()\n articles = []\n if data['status'] == 'ok':\n if data['totalResults'] < amtArticles:\n amtArticles=data['totalResults']\n for i in range(0,amtArticles):\n art = data['articles'][i]\n source = art['source']['name']\n author = art['author']\n title = art['title']\n desc = art['description']\n link = art['url']\n image = art['urlToImage']\n pubDate = art['publishedAt']\n articles.append(Article(title, author, desc, source, image, pubDate, link))\n return articles\n else:\n return {'Error': 'API call failed, status = ' + data['status'] }\n\nclass Article:\n \"\"\"Class for Articles\"\"\"\n def __init__(self, title, author, description, source, image, publishDate, url):\n self.title = title\n self.author = author\n self.description = description\n self.source = source\n self.image = image\n self.publishDate = publishDate\n self.url = url\n\n'''\n{\n \"status\": \"ok\",\n \"totalResults\": 225,\n -\"articles\": [\n -{\n -\"source\": {\n \"id\": \"axios\",\n \"name\": \"Axios\"\n },\n \"author\": \"Jacob Knutson\",\n \"title\": \"\\\"This is getting insane\\\": Republicans rebuke Trump over baseless election claims\",\n \"description\": \"\\\"STOP Spreading debunked misinformation,\\\" Rep. Adam Kinzinger said.\",\n \"url\": \"https://www.axios.com/trump-republicans-baseless-election-claims-fa686850-efb9-41cd-a25e-1b9cc1b52b85.html\",\n \"urlToImage\": \"https://images.axios.com/OgCs7tNiHvLM8zUDGuXgCOlqgZM=/fit-in/1366x1366/2020/11/06/1604623553559.jpg\",\n \"publishedAt\": \"2020-11-06T01:29:23Z\",\n \"content\": \"A growing list of Republicans have reproached President Trump for his baseless claims of widespread voter fraud.\\r\\nWhy it matters: In televised remarks on Thursday evening. the president provided no e… [+1962 chars]\"\n },\n-{\n'''\n", "sub_path": "news.py", "file_name": "news.py", "file_ext": "py", "file_size_in_byte": 3417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.date.today", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 8, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 36, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "595554131", "text": "import time\nfrom craigslist import CraigslistHousing\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, DateTime, Float, Boolean\nfrom sqlalchemy.orm import sessionmaker\nfrom dateutil.parser import parse\nimport location_helper\nfrom slackclient import SlackClient\nimport settings\nfrom condition import Condition, LocationCondition\n\n# Database connection\nDBEngine = create_engine('sqlite:///listings.db', echo=False)\nBase = declarative_base()\n\nclass Listing(Base):\n \"\"\"\n A table to store data on craigslist listings.\n \"\"\"\n\n __tablename__ = 'listings'\n\n id = Column(Integer, primary_key=True)\n link = Column(String, unique=True)\n created = Column(DateTime)\n geotag = Column(String)\n lat = Column(Float)\n lon = Column(Float)\n name = Column(String)\n price = Column(Float)\n location = Column(String)\n cl_id = Column(Integer, unique=True)\n area = Column(String)\n commute_time = Column(Float)\n\nBase.metadata.create_all(DBEngine)\n\nSession = sessionmaker(bind=DBEngine)\nsession = Session()\n\nclass Scraper:\n \"\"\"\n A scraper class that will pull data from Craigslist and match with the\n criteria.\n \"\"\"\n def __init__(self, site, category, areas_filters_dict,\n slack_settings):\n \"\"\"\n Initializes and instance of this class.\n :param site: The Craigslist site to search.\n :param category: The category of the housing search.\n :param areas_filters_dict: A dictionary of areas with filters to search.\n \"\"\"\n # Create Craigslist clients for each area\n self.cl_clients = {}\n for area, filters in areas_filters_dict.items():\n self.cl_clients[area] = CraigslistHousing(site=site,\n category=category,\n area=area,\n filters=filters)\n\n # Create a Slack client to post satisfying listings to\n self.slack_client = SlackClient(slack_settings[\"slack_token\"])\n self.slack_channel = slack_settings[\"slack_channel\"]\n\n # Initialize filtering conditions\n self.conditions = []\n\n # Initialize work geocode\n self.work_geocode = location_helper.get_geocode(settings.WORK_ADDRESS)\n\n def add_condition(self, condition):\n \"\"\"\n Adds the filtering condition to weed out listings.\n \"\"\"\n # Data validation\n if condition is None or not isinstance(condition, Condition):\n print(\"Condition is not well defined.\")\n return\n\n # Add the condition\n self.conditions.append(condition)\n\n def scrape(self):\n \"\"\"\n Runs the Craigslist scraper, and posts data to slack.\n \"\"\"\n\n # Get all the results from craigslist.\n all_results = []\n for area in self.cl_clients:\n all_results += self.scrape_area(area)\n\n print(\"{}: Got {} results\".format(time.ctime(), len(all_results)))\n\n # Post each result to slack.\n for result in all_results:\n self.post_listing_to_slack(result)\n\n def scrape_area(self, area):\n \"\"\"\n Scrapes craigslist for a certain geographic area, and finds\n the latest listings.\n :param area:\n :return: A list of results.\n \"\"\"\n # Get the latest listing on Craigslist\n listings = self.cl_clients[area].get_results(sort_by='newest',\n geotagged=True,\n limit=20)\n results = []\n\n print(\"Browsing through the new listings...\")\n\n # Process new listing and check for criteria\n for listing in listings:\n # Skip the listing if it is already in the database\n existing_listing = session.query(Listing).filter_by(cl_id=listing[\"id\"]).first()\n if existing_listing is not None:\n continue\n\n # Update location and transportation information\n listing = self.update_geographic_information(listing)\n\n # Create and save the listing so we don't grab it again.\n listing_entity = self._create_listing_entity(listing)\n session.add(listing_entity)\n session.commit()\n\n # Check the listing against conditions\n is_good_listing = True\n for condition in self.conditions:\n # Continue if the listing satisfies the current condition\n if condition.check(listing):\n continue\n\n # Otherwise, mark the listing as bad\n is_good_listing = False\n break\n\n # Do nothing if it is a bad listing\n if not is_good_listing:\n continue\n\n # Add the listing to return listings and database\n results.append(listing)\n\n # Return all the good results\n return results\n\n def post_listing_to_slack(self, listing):\n \"\"\"\n Posts the result to Slack channel.\n :param result: The result to post to Slack channel.\n \"\"\"\n # Data validation\n if listing is not None and not listing:\n print(\"Warning: The listing is not well defined.\")\n return\n\n # Commute time\n duration = listing['commute_time']\n minutes = duration // 60\n seconds = duration % 60\n\n # Build the description string to post\n desc = \"{} | {} | {} | {}m{}s | <{}>\".format(\n listing[\"area\"],\n listing[\"price\"],\n listing[\"name\"],\n minutes,\n seconds,\n listing[\"url\"])\n\n print(\"Desc: {}\".format(desc))\n\n # Post to Slack\n self.slack_client.api_call(\"chat.postMessage\",\n channel=self.slack_channel,\n text=desc,\n username='pybot',\n icon_emoji=':robot_face:')\n\n def update_geographic_information(self, listing):\n \"\"\"\n Updates the geographic information such as location, lattitude,\n longitude and transportation time.\n :param listing: The listing from Craigslist.\n :return: The updated listing result.\n \"\"\"\n # Data validation\n if listing is not None and not listing:\n print(\"Warning: The listing is not well defined.\")\n return listing\n \n # Try to get the geocode from geotag if present\n if \"geotag\" in listing and listing[\"geotag\"] is not None:\n listing[\"lat\"] = listing[\"geotag\"][0]\n listing[\"lon\"] = listing[\"geotag\"][1]\n else:\n # Try to deduce the geocode from the locations\n locations = []\n if \"where\" in listing and listing[\"where\"] is not None:\n locations = location_helper.parse_locations(listing[\"where\"])\n\n # Calculate the average geocode of all the locations\n avg_lat = 0\n avg_lon = 0\n count = 0\n for location in locations:\n lat, lon = location_helper.get_geocode(location)\n avg_lat += lat\n avg_lon += lon\n count += 1\n listing[\"lat\"] = avg_lat / count\n listing[\"lon\"] = avg_lon / count\n\n # Calculate the travel time\n src = (listing['lat'], listing['lon'])\n listing['commute_time'] = location_helper.get_travel_time(src,\n self.work_geocode)\n\n # Return the updated listing\n return listing\n\n def _create_listing_entity(self, listing):\n \"\"\"\n Creates a listing entity for database from the listing result.\n :param listing: The listing result from Craigslist.\n :return: The Listing entity for database.\n \"\"\"\n # Data validation\n if listing is None:\n print(\"Warning: listing parameter should not be None.\")\n return None\n\n # Try parsing the price\n price = -1\n try:\n price = float(listing[\"price\"].replace(\"$\", \"\"))\n except OverflowError:\n pass\n\n # TODO: Clean up the area and bart station\n listing[\"area\"] = \"Seattle\"\n\n # Create listing entity\n return Listing(\n link=listing[\"url\"],\n created=parse(listing[\"datetime\"]),\n lat=listing[\"lat\"],\n lon=listing[\"lon\"],\n name=listing[\"name\"],\n price=price,\n location=listing[\"where\"],\n cl_id=listing[\"id\"],\n area=listing[\"area\"],\n commute_time=listing[\"commute_time\"]\n )\n", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 8874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 24, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 26, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 27, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 29, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 30, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 31, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 32, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 33, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 34, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 35, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 39, "usage_type": "call"}, {"api_name": "craigslist.CraigslistHousing", "line_number": 58, "usage_type": "call"}, {"api_name": "slackclient.SlackClient", "line_number": 64, "usage_type": "call"}, {"api_name": "location_helper.get_geocode", "line_number": 71, "usage_type": "call"}, {"api_name": "settings.WORK_ADDRESS", "line_number": 71, "usage_type": "attribute"}, {"api_name": "condition.Condition", "line_number": 78, "usage_type": "argument"}, {"api_name": "time.ctime", "line_number": 95, "usage_type": "call"}, {"api_name": "condition.check", "line_number": 135, "usage_type": "call"}, {"api_name": "location_helper.parse_locations", "line_number": 205, "usage_type": "call"}, {"api_name": "location_helper.get_geocode", "line_number": 212, "usage_type": "call"}, {"api_name": "location_helper.get_travel_time", "line_number": 221, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 251, "usage_type": "call"}]} +{"seq_id": "562504877", "text": "import argparse\nimport numpy as np\nimport sys\nimport os\nimport os.path as osp\nimport time\nimport random\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport utils.joint_transforms as joint_transforms\nimport utils.transforms as extended_transforms\nimport torchvision.transforms as standard_transforms\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom tensorboardX import SummaryWriter\n\nfrom dataset.cityscapes_dataset import CityscapesDataSetLMDB\nfrom dataset.gta5_dataset import GTA5DataSetLMDB\nfrom adv_model import StyleTrans\n\n\ndef main(args):\n writer = SummaryWriter(log_dir=args.tensorboard_log_dir)\n w, h = map(int, args.input_size.split(','))\n\n joint_transform = joint_transforms.Compose([\n joint_transforms.FreeScale((h, w)),\n joint_transforms.RandomHorizontallyFlip(),\n ])\n normalize = ((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n src_input_transform = standard_transforms.Compose([\n standard_transforms.ToTensor(),\n ])\n tgt_input_transform = standard_transforms.Compose([\n standard_transforms.ToTensor(),\n standard_transforms.Normalize(*normalize),\n ])\n val_input_transform = standard_transforms.Compose([\n extended_transforms.FreeScale((h, w)),\n standard_transforms.ToTensor(),\n standard_transforms.Normalize(*normalize),\n ])\n target_transform = extended_transforms.MaskToTensor()\n restore_transform = standard_transforms.ToPILImage()\n\n src_dataset = GTA5DataSetLMDB(\n args.data_dir, args.data_list,\n joint_transform=joint_transform,\n transform=src_input_transform, \n target_transform=target_transform,\n )\n src_loader = data.DataLoader(\n src_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True, drop_last=True\n )\n tgt_dataset = CityscapesDataSetLMDB(\n args.data_dir_target, args.data_list_target,\n joint_transform=joint_transform,\n transform=tgt_input_transform, \n target_transform=target_transform,\n )\n tgt_loader = data.DataLoader(\n tgt_dataset, batch_size=args.batch_size, shuffle=True, \n num_workers=args.num_workers, pin_memory=True, drop_last=True\n )\n\n val_dataset = CityscapesDataSetLMDB(\n args.data_dir_val, args.data_list_val,\n transform=val_input_transform,\n target_transform=target_transform,\n )\n val_loader = data.DataLoader(\n val_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=True, drop_last=False\n )\n\n style_trans = StyleTrans(args)\n style_trans.train(src_loader, tgt_loader, val_loader, writer)\n\n writer.close()\n\n\ndef get_options():\n parser = argparse.ArgumentParser()\n # train data params\n parser.add_argument(\"--batch_size\", type=int, default=1,\n help=\"Number of images sent to the network in one step.\")\n parser.add_argument(\"--num_workers\", type=int, default=1,\n help=\"number of workers for multithread dataloading.\")\n parser.add_argument(\"--input_size\", type=str,\n default='1024,512',\n help=\"Comma-separated string with height and width of source images.\")\n # train path params\n parser.add_argument(\"--data_dir\", type=str,\n default='/mnt/data-1/data/liangchen.song/seg/lmdb/gta5_trans_valid',\n help=\"Path to the directory containing the source dataset.\")\n parser.add_argument(\"--data_list\", type=str,\n default='/home/users/liangchen.song/data/seg/image_list/gta5_trans_valid.txt',\n help=\"Path to the file listing the images in the source dataset.\")\n parser.add_argument(\"--data_dir_target\", type=str, \n default='/mnt/data-1/data/liangchen.song/seg/lmdb/cityscapes_val',\n # default='/mnt/data-1/data/liangchen.song/seg/lmdb/cityscapes_train',\n help=\"Path to the directory containing the source dataset.\")\n parser.add_argument(\"--data_list_target\", type=str, \n default='/home/users/liangchen.song/data/seg/image_list/cityscapes_val.txt',\n help=\"Path to the file listing the images in the source dataset.\")\n parser.add_argument(\"--data_dir_val\", type=str,\n default='/mnt/data-1/data/liangchen.song/seg/lmdb/cityscapes_val',\n # default='/mnt/data-1/data/liangchen.song/seg/lmdb/cityscapes_train',\n help=\"Path to the directory containing the source dataset.\")\n parser.add_argument(\"--data_list_val\", type=str,\n default='/home/users/liangchen.song/data/seg/image_list/cityscapes_val.txt',\n help=\"Path to the file listing the images in the source dataset.\")\n parser.add_argument(\"--model_path_prefix\", type=str,\n default='/home/users/liangchen.song/data/models')\n parser.add_argument(\"--resume\", type=str,\n default='/home/users/liangchen.song/data/trained_models/')\n # loss params\n parser.add_argument(\"--lambda_values\", type=str, default='1,0,0,1e-2')\n # network params\n parser.add_argument(\"--seg_net\", type=str, default='fcn')\n parser.add_argument(\"--n_blocks\", type=int, default=9)\n parser.add_argument(\"--n_classes\", type=int, default=19)\n # optimize params\n parser.add_argument(\"--optimizer\", type=str, default='adam')\n parser.add_argument(\"--learning_rate\", type=float, default=1e-3)\n parser.add_argument(\"--D_learning_rate\", type=float, default=1e-3)\n parser.add_argument(\"--momentum\", type=float, default=0.9,\n help=\"Momentum component of the optimiser.\")\n parser.add_argument(\"--warm_up_epoch\", type=int, default=3)\n parser.add_argument(\"--num_epoch\", type=int, default=10)\n # log params\n parser.add_argument(\"--print_freq\", type=int, default=100)\n parser.add_argument(\"--show_img_freq\", type=int, default=100)\n parser.add_argument(\"--checkpoint_freq\", type=int, default=200)\n parser.add_argument(\"--save_path_prefix\", type=str, default='./data/out')\n parser.add_argument(\"--fcn_name\", type=str, default='fcn.pth')\n parser.add_argument(\"--tensorboard_log_dir\", type=str, default='./logs')\n parser.add_argument(\"-f\", type=str, default=None)\n\n args = parser.parse_args()\n\n return args\n\nif __name__ == '__main__':\n args = get_options()\n main(args)\n", "sub_path": "torch_submit/train_gen_seg.py", "file_name": "train_gen_seg.py", "file_ext": "py", "file_size_in_byte": 6630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.use", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.joint_transforms.Compose", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.joint_transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.joint_transforms.FreeScale", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.joint_transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.joint_transforms.RandomHorizontallyFlip", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.joint_transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 39, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.transforms.FreeScale", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.transforms", "line_number": 46, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 47, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 48, "usage_type": "name"}, {"api_name": "utils.transforms.MaskToTensor", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.transforms", "line_number": 50, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 51, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 51, "usage_type": "name"}, {"api_name": "dataset.gta5_dataset.GTA5DataSetLMDB", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 59, "usage_type": "name"}, {"api_name": "dataset.cityscapes_dataset.CityscapesDataSetLMDB", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 69, "usage_type": "name"}, {"api_name": "dataset.cityscapes_dataset.CityscapesDataSetLMDB", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 79, "usage_type": "name"}, {"api_name": "adv_model.StyleTrans", "line_number": 84, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "40651104", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import font_manager, rc\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\ndef What_team():\n a= input('야구 팀을 입력하세요>>>')\n if a in Xteam_names:\n for team1 in win_number:\n if a==team1:\n N=win_number[team1]\n else:\n pass\n for team2 in win_rate:\n if a == team2:\n R = win_rate[team2]\n else:\n pass\n for team3 in Ranking:\n if a == team3:\n L= Ranking[team3]\n else:\n pass\n return print('올해 {}등인 {}의 승률은 {}, 승리한 횟수는 {} 입니다.'.format(L, a, R, N))\n else:\n return print(\"존재하지 않는 야구팀 입니다\")\n\n#폰트 설정\nrc('font', family='HCR Dotum')\n\n# 데이터 수집\nhtml=urlopen('https://sports.news.naver.com/kbaseball/record/index.nhn?category=kbo&year=2021')\n\nsoup=BeautifulSoup(html, \"lxml\")\nkbo_table = soup.find_all('tbody', {\"id\" : \"regularTeamRecordList_table\"})\n\n\n# 리스트 제작 시작\nkbo_table_tbody=kbo_table[0].find_all(\"tr\")\nteam_data=[]\n\nfor team in kbo_table_tbody:\n td=team.find_all('td')\n such_data=[]\n for content in td:\n a=content.get_text().strip()\n such_data.append(a)\n team_data.append(such_data)\n\n# 순위 제작\nranking=[]\n\nfor team in kbo_table_tbody:\n td=team.find_all('th')\n\n for content in td:\n a=content.get_text().strip()\n ranking.append(a)\n\n\n\n# 승률과 팀명 딕셔너리와 승수와 팀명으로 재생���\nwin_rate={}\nwin_number={}\n\n#이때 각각을 리스트로 변환: 그래프 그리기 쉽게 만듬\nXteam_names=[]\nYwin_rate=[]\n\nfor team in team_data:\n team_name=team[0]\n Xteam_names.append(team_name)\n team_winRate=team[5]\n Ywin_rate.append(team_winRate)\n team_winNumber=team[2]\n win_rate[team_name]=team_winRate\n win_number[team_name]=team_winNumber\n\n# 팀의 순위와 팀 이름을 매칭한다\nRanking={}\nfor n in range(0,len(ranking)):\n Ranking[Xteam_names[n]]=ranking[n]\n\nWhat_team()\n\nx = np.arange(10)\n\nplt.bar(x, Ywin_rate)\nplt.xticks(x, Xteam_names)\nplt.title('2021 KBO 현황')\nplt.xlabel('팀명')\nplt.ylabel('승률')\nplt.show()\n", "sub_path": "firstteam.py", "file_name": "firstteam.py", "file_ext": "py", "file_size_in_byte": 2288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.rc", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "215157705", "text": "# coding: utf-8\n\n\"\"\"\n LoRa App Server REST API\n\n For more information about the usage of the LoRa App Server (REST) API, see [https://docs.loraserver.io/lora-app-server/api/](https://docs.loraserver.io/lora-app-server/api/). # noqa: E501\n\n OpenAPI spec version: 1.0.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.models.common_modulation import CommonModulation # noqa: F401,E501\nfrom swagger_client.models.gw_delay_timing_info import GwDelayTimingInfo # noqa: F401,E501\nfrom swagger_client.models.gw_downlink_timing import GwDownlinkTiming # noqa: F401,E501\nfrom swagger_client.models.gw_fsk_modulation_info import GwFSKModulationInfo # noqa: F401,E501\nfrom swagger_client.models.gw_gps_epoch_timing_info import GwGPSEpochTimingInfo # noqa: F401,E501\nfrom swagger_client.models.gw_immediately_timing_info import GwImmediatelyTimingInfo # noqa: F401,E501\nfrom swagger_client.models.gw_lo_ra_modulation_info import GwLoRaModulationInfo # noqa: F401,E501\n\n\nclass ApiDownlinkTXInfo(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'antenna': 'int',\n 'board': 'int',\n 'context': 'str',\n 'delay_timing_info': 'GwDelayTimingInfo',\n 'frequency': 'int',\n 'fsk_modulation_info': 'GwFSKModulationInfo',\n 'gateway_id': 'str',\n 'gps_epoch_timing_info': 'GwGPSEpochTimingInfo',\n 'immediately_timing_info': 'GwImmediatelyTimingInfo',\n 'lora_modulation_info': 'GwLoRaModulationInfo',\n 'modulation': 'CommonModulation',\n 'power': 'int',\n 'timing': 'GwDownlinkTiming'\n }\n\n attribute_map = {\n 'antenna': 'antenna',\n 'board': 'board',\n 'context': 'context',\n 'delay_timing_info': 'delayTimingInfo',\n 'frequency': 'frequency',\n 'fsk_modulation_info': 'fskModulationInfo',\n 'gateway_id': 'gatewayId',\n 'gps_epoch_timing_info': 'gpsEpochTimingInfo',\n 'immediately_timing_info': 'immediatelyTimingInfo',\n 'lora_modulation_info': 'loraModulationInfo',\n 'modulation': 'modulation',\n 'power': 'power',\n 'timing': 'timing'\n }\n\n def __init__(self, antenna=None, board=None, context=None, delay_timing_info=None, frequency=None, fsk_modulation_info=None, gateway_id=None, gps_epoch_timing_info=None, immediately_timing_info=None, lora_modulation_info=None, modulation=None, power=None, timing=None): # noqa: E501\n \"\"\"ApiDownlinkTXInfo - a model defined in Swagger\"\"\" # noqa: E501\n\n self._antenna = None\n self._board = None\n self._context = None\n self._delay_timing_info = None\n self._frequency = None\n self._fsk_modulation_info = None\n self._gateway_id = None\n self._gps_epoch_timing_info = None\n self._immediately_timing_info = None\n self._lora_modulation_info = None\n self._modulation = None\n self._power = None\n self._timing = None\n self.discriminator = None\n\n if antenna is not None:\n self.antenna = antenna\n if board is not None:\n self.board = board\n if context is not None:\n self.context = context\n if delay_timing_info is not None:\n self.delay_timing_info = delay_timing_info\n if frequency is not None:\n self.frequency = frequency\n if fsk_modulation_info is not None:\n self.fsk_modulation_info = fsk_modulation_info\n if gateway_id is not None:\n self.gateway_id = gateway_id\n if gps_epoch_timing_info is not None:\n self.gps_epoch_timing_info = gps_epoch_timing_info\n if immediately_timing_info is not None:\n self.immediately_timing_info = immediately_timing_info\n if lora_modulation_info is not None:\n self.lora_modulation_info = lora_modulation_info\n if modulation is not None:\n self.modulation = modulation\n if power is not None:\n self.power = power\n if timing is not None:\n self.timing = timing\n\n @property\n def antenna(self):\n \"\"\"Gets the antenna of this ApiDownlinkTXInfo. # noqa: E501\n\n The antenna identifier for emitting the frame. # noqa: E501\n\n :return: The antenna of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: int\n \"\"\"\n return self._antenna\n\n @antenna.setter\n def antenna(self, antenna):\n \"\"\"Sets the antenna of this ApiDownlinkTXInfo.\n\n The antenna identifier for emitting the frame. # noqa: E501\n\n :param antenna: The antenna of this ApiDownlinkTXInfo. # noqa: E501\n :type: int\n \"\"\"\n\n self._antenna = antenna\n\n @property\n def board(self):\n \"\"\"Gets the board of this ApiDownlinkTXInfo. # noqa: E501\n\n The board identifier for emitting the frame. # noqa: E501\n\n :return: The board of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: int\n \"\"\"\n return self._board\n\n @board.setter\n def board(self, board):\n \"\"\"Sets the board of this ApiDownlinkTXInfo.\n\n The board identifier for emitting the frame. # noqa: E501\n\n :param board: The board of this ApiDownlinkTXInfo. # noqa: E501\n :type: int\n \"\"\"\n\n self._board = board\n\n @property\n def context(self):\n \"\"\"Gets the context of this ApiDownlinkTXInfo. # noqa: E501\n\n Gateway specific context. In case of a Class-A downlink, this contains a copy of the uplink context. # noqa: E501\n\n :return: The context of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._context\n\n @context.setter\n def context(self, context):\n \"\"\"Sets the context of this ApiDownlinkTXInfo.\n\n Gateway specific context. In case of a Class-A downlink, this contains a copy of the uplink context. # noqa: E501\n\n :param context: The context of this ApiDownlinkTXInfo. # noqa: E501\n :type: str\n \"\"\"\n if context is not None and not re.search(r'^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$', context): # noqa: E501\n raise ValueError(r\"Invalid value for `context`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$/`\") # noqa: E501\n\n self._context = context\n\n @property\n def delay_timing_info(self):\n \"\"\"Gets the delay_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n\n Context based delay timing information. # noqa: E501\n\n :return: The delay_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: GwDelayTimingInfo\n \"\"\"\n return self._delay_timing_info\n\n @delay_timing_info.setter\n def delay_timing_info(self, delay_timing_info):\n \"\"\"Sets the delay_timing_info of this ApiDownlinkTXInfo.\n\n Context based delay timing information. # noqa: E501\n\n :param delay_timing_info: The delay_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n :type: GwDelayTimingInfo\n \"\"\"\n\n self._delay_timing_info = delay_timing_info\n\n @property\n def frequency(self):\n \"\"\"Gets the frequency of this ApiDownlinkTXInfo. # noqa: E501\n\n TX frequency (in Hz). # noqa: E501\n\n :return: The frequency of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: int\n \"\"\"\n return self._frequency\n\n @frequency.setter\n def frequency(self, frequency):\n \"\"\"Sets the frequency of this ApiDownlinkTXInfo.\n\n TX frequency (in Hz). # noqa: E501\n\n :param frequency: The frequency of this ApiDownlinkTXInfo. # noqa: E501\n :type: int\n \"\"\"\n\n self._frequency = frequency\n\n @property\n def fsk_modulation_info(self):\n \"\"\"Gets the fsk_modulation_info of this ApiDownlinkTXInfo. # noqa: E501\n\n FSK modulation information. # noqa: E501\n\n :return: The fsk_modulation_info of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: GwFSKModulationInfo\n \"\"\"\n return self._fsk_modulation_info\n\n @fsk_modulation_info.setter\n def fsk_modulation_info(self, fsk_modulation_info):\n \"\"\"Sets the fsk_modulation_info of this ApiDownlinkTXInfo.\n\n FSK modulation information. # noqa: E501\n\n :param fsk_modulation_info: The fsk_modulation_info of this ApiDownlinkTXInfo. # noqa: E501\n :type: GwFSKModulationInfo\n \"\"\"\n\n self._fsk_modulation_info = fsk_modulation_info\n\n @property\n def gateway_id(self):\n \"\"\"Gets the gateway_id of this ApiDownlinkTXInfo. # noqa: E501\n\n Gateway ID. # noqa: E501\n\n :return: The gateway_id of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._gateway_id\n\n @gateway_id.setter\n def gateway_id(self, gateway_id):\n \"\"\"Sets the gateway_id of this ApiDownlinkTXInfo.\n\n Gateway ID. # noqa: E501\n\n :param gateway_id: The gateway_id of this ApiDownlinkTXInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._gateway_id = gateway_id\n\n @property\n def gps_epoch_timing_info(self):\n \"\"\"Gets the gps_epoch_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n\n GPS Epoch timing information. # noqa: E501\n\n :return: The gps_epoch_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: GwGPSEpochTimingInfo\n \"\"\"\n return self._gps_epoch_timing_info\n\n @gps_epoch_timing_info.setter\n def gps_epoch_timing_info(self, gps_epoch_timing_info):\n \"\"\"Sets the gps_epoch_timing_info of this ApiDownlinkTXInfo.\n\n GPS Epoch timing information. # noqa: E501\n\n :param gps_epoch_timing_info: The gps_epoch_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n :type: GwGPSEpochTimingInfo\n \"\"\"\n\n self._gps_epoch_timing_info = gps_epoch_timing_info\n\n @property\n def immediately_timing_info(self):\n \"\"\"Gets the immediately_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n\n Immediately timing information. # noqa: E501\n\n :return: The immediately_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: GwImmediatelyTimingInfo\n \"\"\"\n return self._immediately_timing_info\n\n @immediately_timing_info.setter\n def immediately_timing_info(self, immediately_timing_info):\n \"\"\"Sets the immediately_timing_info of this ApiDownlinkTXInfo.\n\n Immediately timing information. # noqa: E501\n\n :param immediately_timing_info: The immediately_timing_info of this ApiDownlinkTXInfo. # noqa: E501\n :type: GwImmediatelyTimingInfo\n \"\"\"\n\n self._immediately_timing_info = immediately_timing_info\n\n @property\n def lora_modulation_info(self):\n \"\"\"Gets the lora_modulation_info of this ApiDownlinkTXInfo. # noqa: E501\n\n LoRa modulation information. # noqa: E501\n\n :return: The lora_modulation_info of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: GwLoRaModulationInfo\n \"\"\"\n return self._lora_modulation_info\n\n @lora_modulation_info.setter\n def lora_modulation_info(self, lora_modulation_info):\n \"\"\"Sets the lora_modulation_info of this ApiDownlinkTXInfo.\n\n LoRa modulation information. # noqa: E501\n\n :param lora_modulation_info: The lora_modulation_info of this ApiDownlinkTXInfo. # noqa: E501\n :type: GwLoRaModulationInfo\n \"\"\"\n\n self._lora_modulation_info = lora_modulation_info\n\n @property\n def modulation(self):\n \"\"\"Gets the modulation of this ApiDownlinkTXInfo. # noqa: E501\n\n Modulation. # noqa: E501\n\n :return: The modulation of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: CommonModulation\n \"\"\"\n return self._modulation\n\n @modulation.setter\n def modulation(self, modulation):\n \"\"\"Sets the modulation of this ApiDownlinkTXInfo.\n\n Modulation. # noqa: E501\n\n :param modulation: The modulation of this ApiDownlinkTXInfo. # noqa: E501\n :type: CommonModulation\n \"\"\"\n\n self._modulation = modulation\n\n @property\n def power(self):\n \"\"\"Gets the power of this ApiDownlinkTXInfo. # noqa: E501\n\n TX power (in dBm). # noqa: E501\n\n :return: The power of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: int\n \"\"\"\n return self._power\n\n @power.setter\n def power(self, power):\n \"\"\"Sets the power of this ApiDownlinkTXInfo.\n\n TX power (in dBm). # noqa: E501\n\n :param power: The power of this ApiDownlinkTXInfo. # noqa: E501\n :type: int\n \"\"\"\n\n self._power = power\n\n @property\n def timing(self):\n \"\"\"Gets the timing of this ApiDownlinkTXInfo. # noqa: E501\n\n Timing defines the downlink timing to use. # noqa: E501\n\n :return: The timing of this ApiDownlinkTXInfo. # noqa: E501\n :rtype: GwDownlinkTiming\n \"\"\"\n return self._timing\n\n @timing.setter\n def timing(self, timing):\n \"\"\"Sets the timing of this ApiDownlinkTXInfo.\n\n Timing defines the downlink timing to use. # noqa: E501\n\n :param timing: The timing of this ApiDownlinkTXInfo. # noqa: E501\n :type: GwDownlinkTiming\n \"\"\"\n\n self._timing = timing\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ApiDownlinkTXInfo, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ApiDownlinkTXInfo):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "sub_path": "swagger_client/models/api_downlink_tx_info.py", "file_name": "api_downlink_tx_info.py", "file_ext": "py", "file_size_in_byte": 15262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "re.search", "line_number": 184, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 423, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 448, "usage_type": "call"}]} +{"seq_id": "159766766", "text": "import json\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n#From a list of JSON data to a JSON Array\nwith open(\"test_large_collection.json\") as to_format:\n json_to_format = to_format.read()\n print(\"JSON read\")\n json_to_format = json_to_format.replace('\\n','\\n,');\n print(\"JSON replaced characters\")\n json_formatted = \"[\"+json_to_format+\"]\"\n print(\"JSON formatted\")\n #print(json_formatted)\n\n#to_format.close();\n\n#read the json format\njson_data = json.loads(json_formatted)\n\n#cleaning data\nfor i1, rsvp1 in enumerate(json_data):\n #print(repr(i1))\n for i2, rsvp2 in enumerate(json_data):\n if rsvp1[\"rsvp_id\"]==rsvp2[\"rsvp_id\"] and i1!=i2:\n if rsvp1[\"mtime\"]>=rsvp2[\"mtime\"]:\n del json_data[i2]\n else:\n del json_data[i1]\n\n#print(json_data)\n\n#Saves sanitized json\nwith open('sanitized_json.json', 'w') as outfile:\n json.dump(json_data, outfile)\n\n", "sub_path": "Python/read_and_sanitize_json.py", "file_name": "read_and_sanitize_json.py", "file_ext": "py", "file_size_in_byte": 938, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "370960395", "text": "import json\nimport redis\nimport threading\nimport logging\nfrom psycopg2.pool import ThreadedConnectionPool\nfrom core.events.oktell import OktellOrderAccepted\nfrom core.coreapi import TMAPI\nfrom .kts.channels import Channel, Channels\nfrom .config import REDIS, CHANNELS, DSN, TME_DB\nfrom orders.database import AsteriskSounds\n\n\nLOGGER = logging.getLogger()\n\n\ndef enrich_data(data):\n with TMAPI.PG_POOL.getconn() as pgcon:\n with pgcon.cursor() as c:\n c.execute((\n 'select o.driver_timecount, '\n 'o.discountedsumm, o.clientid as client_id, '\n 'o.driverid, o.cashless, o.state, o.phone, '\n 'cr.gosnumber, cr.color as car_color, '\n 'cr.mark as car_mark, coalesce(cr.model, \\'\\') as car_model, '\n 'cr.id as car_id, o.source_time '\n 'from orders o '\n 'join crews cw on (cw.id=o.crewid) '\n 'join cars cr on (cr.id=cw.carid) '\n 'where o.id=%(order_id)s;'),\n {'order_id': data['order_id']})\n r = c.fetchone()\n if r:\n order_data = {cn.name :r[ix] for ix, cn in enumerate(c.description)}\n data.update(**order_data)\n return data\n\ndef get_distributor(phone, db):\n LOGGER.debug(phone)\n\n phone = phone[-10:]\n SELECT = 'select sc.id, d.address, c.port as channel, count(sms.*) as sim_count, dst.name, dst_sms.sms '\\\n 'from routes_mask roma '\\\n 'join regions reg on (reg.id=roma.region_id) '\\\n 'join operators op on (op.id=roma.operator_id) '\\\n 'join distributors dst on (dst.id=op.distributor_id or dst.id=0) '\\\n 'left join distributors dst_sms on (dst_sms.id=op.distributor_id) '\\\n 'join sim_cards sc on (sc.distributor_id=dst.id) '\\\n 'join channels c on (c.id=sc.channel_id) '\\\n 'join devices d on (d.id=c.device_id) '\\\n 'left join sms on (sms.sim_id=sc.id and sms.date_time > %s and sms.direction=1) '\\\n 'where (roma.aaa=%s and %s between roma.range_a and roma.range_b) and '\\\n 'sc.direction=2 and sc.is_active and (reg.is_home or dst.all_home) and '\\\n '(dst_sms.sms or (dst_sms.id = dst.id)) '\\\n 'group by sc.id, d.address, c.port, dst.id, dst_sms.sms '\\\n 'order by dst.id desc, sim_count '\\\n 'limit 1;'\n\n date_time = datetime.datetime.now()\n date_time = datetime.datetime(\n date_time.year, date_time.month, date_time.day, 0, 0, 0)\n ARGS = (date_time, phone[:3], int(phone[3:]), )\n # logger.debug('get_distributor: %s %s' % (SELECT, ARGS))\n c = db.cursor()\n c.execute(SELECT, ARGS)\n try:\n sim_id, address, channel, _, distributor, _ = c.fetchone()\n channel %= 5060\n except Exception as e:\n LOGGER.warning('get_distributor exception 1: %s %s' % (e, phone))\n sim_id, address, channel, _, distributor, _ = None, '', 0, 0, None, False\n c.close()\n LOGGER.debug('%s %s %s %s %s' %\n (phone, distributor, address, channel, sim_id))\n # db.close()\n return sim_id, address, channel, distributor\n\n\ndef register_channels(pg_pool):\n SELECT = (\n 'select distinct d.address, c.port%5060 as port, dest.name '\n 'from devices d '\n 'join channels c on (c.device_id=d.id) '\n 'join sim_cards sc on (sc.id=c.sim_id) '\n 'join distributors dest on (dest.id=sc.distributor_id) '\n 'where d.name like \\'КТС%\\' and c.is_active and sc.direction=2 '\n 'order by d.address, port'\n )\n channels = Channels(pg_pool, LOGGER)\n with pg_pool.getconn() as pgconn:\n with pgconn.cursor() as c:\n c.execute(SELECT)\n\n [channels.register(Channel(address=ch[0], channel=ch[1]),\n distributor=ch[-1]) for ch in c.fetchall()]\n return channels\n\n\ndef main():\n TMAPI.LOGGER = LOGGER\n TMAPI.ASTERISK_SOUNDS = AsteriskSounds()\n pg_pool = ThreadedConnectionPool(*DSN)\n TMAPI.PG_POOL = ThreadedConnectionPool(*TME_DB)\n channels = register_channels(pg_pool)\n redpool = redis.ConnectionPool(host=REDIS)\n r = redis.Redis(connection_pool=redpool)\n rs = r.pubsub()\n rs.subscribe(CHANNELS)\n\n for event in rs.listen():\n LOGGER.debug('Received %s', event)\n if event['type'] == 'message':\n LOGGER.debug('Got message: %s', event)\n channel = event['channel'].decode().split(':')[1]\n data = json.loads(event['data'])\n data = enrich_data(data)\n phones = data['phones'][0]\n data = TMAPI.create_message(channel, data)\n\n _channel = threading.Thread(\n target=channels.send_sms, args=(data['sms'], phones))\n _channel.start()\n LOGGER.debug('%s: %s', _channel, event)\n\n\nif __name__ == '__main__':\n try:\n m = threading.Thread(target=main)\n m.start()\n m.join()\n except KeyboardInterrupt as e:\n exit(0)\n", "sub_path": "sms/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "core.coreapi.TMAPI.PG_POOL.getconn", "line_number": 17, "usage_type": "call"}, {"api_name": "core.coreapi.TMAPI.PG_POOL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "core.coreapi.TMAPI", "line_number": 17, "usage_type": "name"}, {"api_name": "kts.channels.Channels", "line_number": 88, "usage_type": "call"}, {"api_name": "kts.channels.Channel", "line_number": 93, "usage_type": "call"}, {"api_name": "core.coreapi.TMAPI.LOGGER", "line_number": 99, "usage_type": "attribute"}, {"api_name": "core.coreapi.TMAPI", "line_number": 99, "usage_type": "name"}, {"api_name": "core.coreapi.TMAPI.ASTERISK_SOUNDS", "line_number": 100, "usage_type": "attribute"}, {"api_name": "core.coreapi.TMAPI", "line_number": 100, "usage_type": "name"}, {"api_name": "orders.database.AsteriskSounds", "line_number": 100, "usage_type": "call"}, {"api_name": "psycopg2.pool.ThreadedConnectionPool", "line_number": 101, "usage_type": "call"}, {"api_name": "config.DSN", "line_number": 101, "usage_type": "name"}, {"api_name": "core.coreapi.TMAPI.PG_POOL", "line_number": 102, "usage_type": "attribute"}, {"api_name": "core.coreapi.TMAPI", "line_number": 102, "usage_type": "name"}, {"api_name": "psycopg2.pool.ThreadedConnectionPool", "line_number": 102, "usage_type": "call"}, {"api_name": "config.TME_DB", "line_number": 102, "usage_type": "name"}, {"api_name": "redis.ConnectionPool", "line_number": 104, "usage_type": "call"}, {"api_name": "config.REDIS", "line_number": 104, "usage_type": "name"}, {"api_name": "redis.Redis", "line_number": 105, "usage_type": "call"}, {"api_name": "config.CHANNELS", "line_number": 107, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "core.coreapi.TMAPI.create_message", "line_number": 117, "usage_type": "call"}, {"api_name": "core.coreapi.TMAPI", "line_number": 117, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 119, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "420661277", "text": "# Google Spreadsheet Libraries\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\n\r\n# GUI Libraries\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n\r\n# Regular Expressions Libary\r\nimport re\r\n\r\n# File Flush\r\nimport os \r\n\r\n# My Libararies\r\n# import tkinterFunctions as myTk\r\n# import gspreadFunctions as myGSpread\r\n\r\n# Global Variables\r\nCELL_COL = \"B\"\r\nCELL_CONTENT = \"1\"\r\nGOOGLE_SHEETS_FILENAME = \"Google Sheets Parser Test\"\r\n\r\n# use creds to create a client to interact with the Google Drive API\r\nscope = ['https://spreadsheets.google.com/feeds',\r\n 'https://www.googleapis.com/auth/drive']\r\ncreds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\r\nclient = gspread.authorize(creds)\r\n\r\n# Find a workbook by name and open the first sheet\r\n# Make sure you use the right name here.\r\nsheet = client.open(GOOGLE_SHEETS_FILENAME).sheet1\r\n\r\n# Initialize Tkinter GUI Window\r\nroot = Tk()\r\nroot.title(\"UCI RCC Google Sheets Parser\")\r\n\r\n# Clear/erase input.txt File\r\nfp = open(\"input.txt\", \"w+\")\r\nfp.seek(0)\r\nfp.truncate()\r\nfp.close()\r\n\r\n# Tkinter Command Functions\r\ndef retrieve_spreadsheet_name(self, *args):\r\n global GOOGLE_SHEETS_FILENAME\r\n GOOGLE_SHEETS_FILENAME = spreadsheet_name.get()\r\n\r\n# New implementation: Inputting Letters\r\ndef retrieve_cell_col(self, *args):\r\n global CELL_COL\r\n temp = cell_col.get()\r\n temp = temp.upper()\r\n result = 0\r\n\r\n for i, T, in enumerate(temp[::-1]):\r\n letterNumber = ord(T) - ord(\"A\") + 1\r\n result += letterNumber * (26 ** i)\r\n CELL_COL = result\r\n\r\ndef retrieve_cell_content(self, *args):\r\n global CELL_CONTENT\r\n CELL_CONTENT = cell_content.get()\r\n\r\n# onButton \r\ndef retrieveInputForTextFile():\r\n inputValue = inputFileTextBox.get(\"1.0\", \"end-1c\")\r\n # Open file i/o\r\n try:\r\n fp = open(\"input.txt\", \"w\")\r\n except:\r\n # print(\"File to open does not exist!\")\r\n progress_message.set(\"Failed to open input.txt, file does not exist!\")\r\n fp.write(inputValue) # Write to input.txt file\r\n # fp.write(\"\\n\")\r\n\r\n # Commands for real-time updating input.txt file\r\n fp.flush()\r\n os.fsync(fp.fileno())\r\n\r\n # print(inputValue) # Debugging/Printing to Console\r\n inputFileTextBox.delete(1.0, END) # Clear's TextBox Widget on Success\r\n fp.close() # Close the file i/o\r\n\r\n# Gspread Functions\r\ndef makeChangesToSpreadsheet():\r\n # Edge cases\r\n\r\n # Should not overwrite member names\r\n if CELL_COL == 1:\r\n progress_message.set(\"Invalid option, cannot update cells that contain member names.\")\r\n return\r\n\r\n # Set Google Sheets\r\n try:\r\n sheet = client.open(GOOGLE_SHEETS_FILENAME).sheet1\r\n except:\r\n progress_message.set(\"Invalid Google Sheets Filename.\")\r\n return\r\n\r\n\r\n # Attempt to open input.txt File\r\n try:\r\n fpr = open(\"input.txt\", \"r\")\r\n except:\r\n print(\"File to open does not exist!\")\r\n # Attempt to open error.txt File\r\n try:\r\n fpErr = open(\"error.txt\", \"w\")\r\n except:\r\n print(\"Failed to open error.txt\")\r\n\r\n name = fpr.readline().strip()\r\n\r\n # Variables to keep track of errors and progress\r\n entries_changed = 0\r\n errors = 0\r\n\r\n while name:\r\n try:\r\n nameRegex = re.compile(name, re.IGNORECASE)\r\n cell = sheet.find(nameRegex)\r\n # print(name)\r\n\r\n # print(\"Found something at R%s C%s\" % (cell.row, cell.col))\r\n # print(\"Updating values B%s\\n\" % (cell.row))\r\n # ex.) B1 -> (Column + Row)\r\n\r\n \r\n sheet.update_cell(cell.row, CELL_COL, CELL_CONTENT) # Update the value of the current cell\r\n\r\n entries_changed += 1\r\n\r\n name = fpr.readline().strip() # Continue iterating through file\r\n except:\r\n # print(\"FAILED TO COMPUTE FOR: %s\\n\" % (name)) # Error Message for feedback\r\n fpErr.write(\"FAILED TO COMPUTE FOR: %s\\n\" % (name)) # Error Message for feedback to error.txt\r\n errors += 1\r\n name = fpr.readline().strip() # Continue iterating through file\r\n continue # Skip back to beginning of loop\r\n\r\n # Updating entries_changed label \r\n # print(entries_changed)\r\n if (entries_changed == 1):\r\n entries_changed_counter.set(\"%s cell has been updated.\" % (entries_changed))\r\n else:\r\n entries_changed_counter.set(\"%s cells have been updated.\" % (entries_changed))\r\n \r\n # Updating errors_occurred label\r\n if (errors == 1):\r\n errors_occured_counter.set(\"%s error has occured.\" % (errors))\r\n else:\r\n errors_occured_counter.set(\"%s errors have occured.\" % (errors))\r\n\r\n # Updating progress_message label\r\n progress_message.set(\"Success!\")\r\n\r\n # Close file i/o\r\n fpr.close()\r\n fpErr.close()\r\n\r\n # Print success message\r\n # print(\"input.txt file parsing completed\")\r\n\r\n\r\n# GOOGLE_SHEETS_FILENAME\r\nttk.Label(root, text=\"What is the spreadsheet name?:\").pack()\r\nspreadsheet_name = StringVar()\r\nspreadsheet_name.trace_add(\"write\", retrieve_spreadsheet_name)\r\nspreadsheet_name_entry = Entry(root, width = 50, textvariable = spreadsheet_name)\r\nspreadsheet_name_entry.pack()\r\n\r\n# CELL_COL\r\nttk.Label(root, text=\"Which column are we modifying? (Enter a letter):\").pack()\r\ncell_col = StringVar()\r\ncell_col.trace_add(\"write\", retrieve_cell_col)\r\ncell_col_entry = Entry(root, width = 8, textvariable = cell_col)\r\ncell_col_entry.pack()\r\n\r\n# CELL_CONTENT\r\nttk.Label(root, text=\"How many points do you want to input:\").pack()\r\ncell_content = StringVar()\r\ncell_content.trace_add(\"write\", retrieve_cell_content)\r\ncell_content_entry = Entry(root, width = 8, textvariable = cell_content)\r\ncell_content_entry.pack()\r\n\r\n# FULL NAMES TO ADD TO INPUT FILE\r\nttk.Label(root, text=\"Please input names of members who attended this meeting:\").pack()\r\ninputFileTextBox = Text(root, height=30, width=20)\r\ninputFileTextBox.pack()\r\nsubmitButton = ttk.Button(root, text='Submit Names', command= retrieveInputForTextFile)\r\nsubmitButton.pack()\r\n\r\n##startframe = Frame(root)\r\n#canvas = Canvas(root, width = 1000, height = 1000)\r\n#canvas.pack()\r\n#img = PhotoImage(master = canvas, file=\"rcc_logo.png\", width = 1000, height = 1000)\r\n#canvas.create_image(20,20, anchor = NW, image = img)\r\n#PhotoImage(master = canvas, width = 20, height = 20)\r\n\r\n\r\n# entries_changed message/label\r\nentries_changed_counter = StringVar()\r\nentries_changed_counter.set(\"0 cells have been updated so far.\")\r\nentries_changed_label = Label(root, textvariable=entries_changed_counter)\r\nentries_changed_label.pack()\r\n\r\n# errors_occured message/label\r\nerrors_occured_counter = StringVar()\r\nerrors_occured_counter.set(\"0 errors have occured so far.\") \r\nerrors_occured_label = Label(root, textvariable=errors_occured_counter)\r\nerrors_occured_label.pack()\r\n\r\n# progress message\r\nprogress_message = StringVar()\r\nprogress_message.set(\"Thanks for using my application! -Dennis\") \r\nprogress_message_label = Label(root, textvariable=progress_message)\r\nprogress_message_label.pack()\r\n\r\n# SUBMIT CHANGES TO GSPREADSHEET\r\nfinalSubmissionButton = ttk.Button(root, text='Submit Changes!', command = makeChangesToSpreadsheet)\r\nfinalSubmissionButton.pack()\r\n\r\nroot.mainloop()\r\n\r\ntry:\r\n root.destroy()\r\nexcept:\r\n pass\r\n\r\n# print(\"Dennis' Google Sheets Parser has finished running successfully.\")", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 27, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 27, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 28, "usage_type": "call"}, {"api_name": "os.fsync", "line_number": 79, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 121, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Label", "line_number": 167, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 167, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 174, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 174, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 181, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 181, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 188, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 188, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 191, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 191, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 221, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 221, "usage_type": "name"}]} +{"seq_id": "416806238", "text": "import torch\nfrom torch import nn, optim\nfrom torchvision import transforms, datasets, models\nfrom collections import OrderedDict\nfrom PIL import Image\nimport numpy as np\n\ndef train(data_dir, save_dir, arch, learning_rate, hidden_units, epochs, gpu):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n # Load a pre-trained network\n model = eval(\"models.{}(pretrained=True)\".format(arch))\n \n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(25088, hidden_units)),\n ('relu1', nn.ReLU()),\n ('dropout1', nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(hidden_units, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\n for params in model.parameters():\n params.requires_grad = False\n\n model.classifier = classifier\n if gpu:\n model.cuda()\n else:\n model.cpu()\n \n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)\n \n # TODO: Define your transforms for the training, validation, and testing sets\n default_transforms = [transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]\n data_transforms = {\n 'training': transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ]),\n 'validation': transforms.Compose(default_transforms),\n 'testing': transforms.Compose(default_transforms)\n }\n\n # TODO: Load the datasets with ImageFolder\n image_datasets = {\n 'training': datasets.ImageFolder(train_dir, transform=data_transforms['training']),\n 'validation': datasets.ImageFolder(valid_dir, transform=data_transforms['validation']),\n 'testing': datasets.ImageFolder(test_dir, transform=data_transforms['testing'])\n }\n\n # TODO: Using the image datasets and the trainforms, define the dataloaders\n dataloaders = {\n 'training': torch.utils.data.DataLoader(image_datasets['training'], batch_size=64, shuffle=True),\n 'validation': torch.utils.data.DataLoader(image_datasets['validation'], batch_size=64, shuffle=True),\n 'testing': torch.utils.data.DataLoader(image_datasets['testing'], batch_size=64, shuffle=False)\n }\n\n model.train()\n print_every = 40\n steps = 0\n\n for e in range(epochs):\n model.train()\n running_loss = 0\n accuracy_train = 0\n\n for inputs, labels in dataloaders['training']:\n steps += 1\n if gpu:\n inputs, labels = inputs.cuda(), labels.cuda()\n optimizer.zero_grad()\n\n output = model.forward(inputs)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n # Track the loss and accuracy on the validation set to determine the best hyperparameters\n if steps % print_every == 0:\n model.eval()\n valid_accuracy = 0\n valid_running_loss = 0\n\n for valid_inputs, valid_labels in dataloaders['validation']:\n if gpu:\n valid_inputs, valid_labels = valid_inputs.cuda(), valid_labels.cuda()\n valid_output = model.forward(valid_inputs)\n valid_loss = criterion(valid_output, valid_labels)\n valid_running_loss += valid_loss.item()\n\n ps = torch.exp(valid_output)\n valid_equality = (valid_labels.data == ps.max(dim=1)[1])\n valid_accuracy += valid_equality.type_as(torch.FloatTensor()).mean()\n\n print(\"Epoch: {}\".format(e),\n \"Training Loss: {:.3f} \".format(running_loss/print_every),\n \"Validation Loss: {:.3f} \".format(valid_running_loss / len(dataloaders['validation'])),\n \"Validation Accuracy: {:.3f}\".format(valid_accuracy / len(dataloaders['validation'])))\n model.train()\n \n model.class_to_idx = image_datasets['training'].class_to_idx\n if gpu:\n model.cuda()\n else:\n model.cpu()\n torch.save({'arch': arch,\n 'epochs_number': epochs,\n 'hidden_units': hidden_units,\n 'optimizer_state': optimizer.state_dict,\n 'state_dict': model.state_dict(),\n 'class_to_idx': model.class_to_idx},\n save_dir + '/checkpoint.pth')\n", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 5073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.nn.Sequential", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 39, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 46, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 48, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 49, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 55, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 55, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 56, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "547320975", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 6 16:51:35 2019\n\n@author: gianni\n\"\"\"\n\nfrom glob import glob\nfrom time import sleep\nfrom baselines.bench import load_results\nfrom matplotlib import pylab as plt\nimport numpy as np\nimport argparse\nimport os\nimport pandas as pd\n\n\ndef get_args():\n\n parser = argparse.ArgumentParser(description='RL')\n parser.add_argument(\n '--log-dir',default=None, help='dir save models and statistics')\n args = parser.parse_args()\n args.log_dir = os.path.expanduser(args.log_dir)\n return args\n\n\nargs = get_args()\nmy_dir = args.log_dir\nexperiments_path = glob(my_dir+'/*/')\nblack_list = [\n #\"exp_baseline\",\n \"exp_baseline_rnn\",\n \"exp_attention_space_mid\",\n \"exp_attention_space_fin\",\n \"exp_attention_time\",\n #\"exp_baseline_100\",\n #\"exp_attention_time_100\",\n]\n\nexperiment_names = [path.split('/')[-2] for path in experiments_path if\n path.split('/')[-2] not in black_list]\n\n#experiment_names = sorted(experiment_names, key=int) \n\n\n\n#experiment_names = [\"0.3\",\"0.1\", \"0.05\",\"0\"]\n\ndf = pd.DataFrame()\n\n\nfig = plt.figure(figsize=(15, 9))\n\nfor num, experiment in enumerate(experiments_path):\n df = pd.DataFrame()\n #if experiment.split(\"/\")[-2] not in black_list:\n exps = glob(experiment+'/*/') \n #exps = glob(experiment)\n print(exps)\n\n for _, name in enumerate(exps):\n df_ = load_results(name) \n df = df.append(df_)\n\n\n df['f']= df['l'].cumsum()/1000000\n df['perf']= df['ereward']/(df['max_reward'])\n df['perf'].where(df['perf']>0,0,inplace=True)\n df['goal'] = df['perf']>0.9 #guess a threadshold\n\n roll = 500\n total_time = df['t'].iloc[-1]\n total_steps = df['l'].sum()\n total_episodes = df['r'].size\n experiment_names[num] += \" ({:.1f} h, FPS {:.1f})\".format(total_time / 3600, total_steps/total_time)\n\n \"\"\" ax = plt.subplot(1, 2, 1)\n df[['f','r']].rolling(roll).mean().iloc[0:-1:40].plot('f','r', ax=ax,legend=False)\n ax.set_xlabel('N. steps (M)')\n ax.set_ylabel('Reward')\n ax.grid(True)\n plt.legend(experiment_names, loc='best') \"\"\"\n\n \"\"\" ax = plt.subplot(1, 1, 1)\n df[['f','perf']].rolling(roll).mean().iloc[0:-1:40].plot('f','perf', ax=ax,legend=False)\n ax.set_xlabel('N. steps (M)')\n ax.set_ylabel('Performance')\n ax.grid(True)\n plt.legend(experiment_names, loc='best') \"\"\"\n\n ax = plt.subplot(1, 1, 1)\n df[['f','reward_woD']].rolling(roll).mean().iloc[0:-1:40].plot('f','reward_woD', ax=ax,legend=False)\n ax.set_xlabel('N. steps (M)')\n ax.set_ylabel('Reward without Deomnstrations')\n ax.grid(True)\n\n \"\"\" ax = plt.subplot(2, 2, 3)\n df[['f','goal']].rolling(roll).mean().iloc[0:-1:40].plot('f','goal', ax=ax,legend=False)\n ax.set_xlabel('N. steps (M)')\n ax.set_ylabel('Estimated evalai score')\n ax.grid(True)\n plt.legend(experiment_names, loc='best')\n\n ax = plt.subplot(2, 2, 4)\n df[['l']].rolling(roll).mean().iloc[0:-1:40].plot(y='l', ax=ax,legend=False)\n ax.set_xlabel('N. episodes')\n ax.set_ylabel('Episode lenght')\n ax.grid(True) \"\"\"\n\n plt.legend(experiment_names, loc='best')\n fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.1, hspace=0.2)\n\n\n# fig.tight_layout()\nax.get_figure().savefig(my_dir+'/performance.jpg')\nplt.clf()\nquit()", "sub_path": "main/monitor_exp3.py", "file_name": "monitor_exp3.py", "file_ext": "py", "file_size_in_byte": 3327, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 54, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 57, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 59, "usage_type": "call"}, {"api_name": "baselines.bench.load_results", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pylab.legend", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pylab.clf", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "187278897", "text": "# coding=utf-8\n# Copyright 2023 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"ResNet50 model with HetSNGP.\"\"\"\nimport functools\nimport string\n\nimport edward2 as ed\nimport tensorflow as tf\n\n\n# Use batch normalization defaults from Pytorch.\nBATCH_NORM_DECAY = 0.9\nBATCH_NORM_EPSILON = 1e-5\n\n\ndef MonteCarloDropout( # pylint:disable=invalid-name\n inputs,\n dropout_rate,\n use_mc_dropout,\n filterwise_dropout):\n \"\"\"Defines the Monte Carlo dropout layer.\"\"\"\n training = None\n noise_shape = None\n\n if use_mc_dropout:\n training = True\n\n if filterwise_dropout:\n noise_shape = [inputs.shape[0], 1, 1, inputs.shape[3]]\n\n return tf.keras.layers.Dropout(\n dropout_rate, noise_shape=noise_shape)(\n inputs, training=training)\n\n\ndef make_random_feature_initializer(random_feature_type):\n # Use stddev=0.05 to replicate the default behavior of\n # tf.keras.initializer.RandomNormal.\n if random_feature_type == 'orf':\n return ed.initializers.OrthogonalRandomFeatures(stddev=0.05)\n elif random_feature_type == 'rff':\n return tf.keras.initializers.RandomNormal(stddev=0.05)\n else:\n return random_feature_type\n\n\ndef make_conv2d_layer(use_spec_norm,\n spec_norm_iteration,\n spec_norm_bound):\n \"\"\"Defines type of Conv2D layer to use based on spectral normalization.\"\"\"\n Conv2DBase = functools.partial(tf.keras.layers.Conv2D, padding='same') # pylint: disable=invalid-name\n def Conv2DNormed(*conv_args, **conv_kwargs): # pylint: disable=invalid-name\n return ed.layers.SpectralNormalizationConv2D(\n Conv2DBase(*conv_args, **conv_kwargs),\n iteration=spec_norm_iteration,\n norm_multiplier=spec_norm_bound)\n\n return Conv2DNormed if use_spec_norm else Conv2DBase\n\n\ndef bottleneck_block(inputs, filters, stage, block, strides, conv_layer,\n dropout_layer):\n \"\"\"Residual block with 1x1 -> 3x3 -> 1x1 convs in main path.\n\n Note that strides appear in the second conv (3x3) rather than the first (1x1).\n This is also known as \"ResNet v1.5\" as it differs from He et al. (2015)\n (http://torch.ch/blog/2016/02/04/resnets.html).\n\n Args:\n inputs: tf.Tensor.\n filters: list of integers, the filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n strides: Strides for the second conv layer in the block.\n conv_layer: tf.keras.layers.Layer.\n dropout_layer: Callable for dropout layer.\n\n Returns:\n tf.Tensor.\n \"\"\"\n filters1, filters2, filters3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = conv_layer(\n filters1,\n kernel_size=1,\n use_bias=False,\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')(\n inputs)\n x = tf.keras.layers.BatchNormalization(\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2a')(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = dropout_layer(x)\n\n x = conv_layer(\n filters2,\n kernel_size=3,\n strides=strides,\n padding='same',\n use_bias=False,\n kernel_initializer='he_normal',\n name=conv_name_base + '2b')(\n x)\n x = tf.keras.layers.BatchNormalization(\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2b')(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = dropout_layer(x)\n\n x = conv_layer(\n filters3,\n kernel_size=1,\n use_bias=False,\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')(\n x)\n x = tf.keras.layers.BatchNormalization(\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '2c')(x)\n\n shortcut = inputs\n if not x.shape.is_compatible_with(shortcut.shape):\n shortcut = conv_layer(\n filters3,\n kernel_size=1,\n use_bias=False,\n strides=strides,\n kernel_initializer='he_normal',\n name=conv_name_base + '1')(\n shortcut)\n shortcut = tf.keras.layers.BatchNormalization(\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name=bn_name_base + '1')(shortcut)\n shortcut = dropout_layer(shortcut)\n\n x = tf.keras.layers.add([x, shortcut])\n x = tf.keras.layers.Activation('relu')(x)\n return x\n\n\ndef group(inputs, filters, num_blocks, stage, strides, conv_layer,\n dropout_layer):\n \"\"\"Group of residual blocks.\"\"\"\n blocks = string.ascii_lowercase\n x = bottleneck_block(\n inputs,\n filters,\n stage,\n block=blocks[0],\n strides=strides,\n conv_layer=conv_layer,\n dropout_layer=dropout_layer)\n for i in range(num_blocks - 1):\n x = bottleneck_block(\n x,\n filters,\n stage,\n block=blocks[i + 1],\n strides=1,\n conv_layer=conv_layer,\n dropout_layer=dropout_layer)\n return x\n\n\ndef resnet50_hetsngp_add_last_layer(\n inputs, x, num_classes, num_factors, use_gp_layer, gp_hidden_dim, gp_scale,\n gp_bias, gp_input_normalization, gp_random_feature_type,\n gp_cov_discount_factor, gp_cov_ridge_penalty,\n gp_output_imagenet_initializer, temperature, num_mc_samples, eps,\n sngp_var_weight, het_var_weight):\n \"\"\"Builds ResNet50.\n\n Using strided conv, pooling, four groups of residual blocks, and pooling, the\n network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 ->\n 14x14 -> 7x7 (Table 1 of He et al. (2015)).\n\n Args:\n inputs: inputs\n x: x\n num_classes: Number of output classes.\n num_factors: Number of factors for the heteroscedastic variance.\n use_gp_layer: Whether to use Gaussian process layer as the output layer.\n gp_hidden_dim: The hidden dimension of the GP layer, which corresponds to\n the number of random features used for the approximation.\n gp_scale: The length-scale parameter for the RBF kernel of the GP layer.\n gp_bias: The bias term for GP layer.\n gp_input_normalization: Whether to normalize the input using LayerNorm for\n GP layer. This is similar to automatic relevance determination (ARD) in\n the classic GP learning.\n gp_random_feature_type: The type of random feature to use for\n `RandomFeatureGaussianProcess`.\n gp_cov_discount_factor: The discount factor to compute the moving average of\n precision matrix.\n gp_cov_ridge_penalty: Ridge penalty parameter for GP posterior covariance.\n gp_output_imagenet_initializer: Whether to initialize GP output layer using\n Gaussian with small standard deviation (sd=0.01).\n temperature: Float or scalar `Tensor` representing the softmax\n temperature.\n num_mc_samples: The number of Monte-Carlo samples used to estimate the\n predictive distribution.\n eps: Float. Clip probabilities into [eps, 1.0] softmax or\n [eps, 1.0 - eps] sigmoid before applying log (softmax), or inverse\n sigmoid.\n sngp_var_weight: Weight in [0,1] for the SNGP variance in the output.\n het_var_weight: Weight in [0,1] for the het. variance in the output.\n\n Returns:\n tf.keras.Model.\n \"\"\"\n x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)\n\n if use_gp_layer:\n gp_output_initializer = None\n if gp_output_imagenet_initializer:\n # Use the same initializer as dense\n gp_output_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)\n output_layer = functools.partial(\n ed.layers.HeteroscedasticSNGPLayer,\n num_factors=num_factors,\n num_inducing=gp_hidden_dim,\n gp_kernel_scale=gp_scale,\n gp_output_bias=gp_bias,\n normalize_input=gp_input_normalization,\n gp_cov_momentum=gp_cov_discount_factor,\n gp_cov_ridge_penalty=gp_cov_ridge_penalty,\n scale_random_features=False,\n use_custom_random_features=True,\n custom_random_features_initializer=make_random_feature_initializer(\n gp_random_feature_type),\n kernel_initializer=gp_output_initializer,\n temperature=temperature,\n train_mc_samples=num_mc_samples,\n test_mc_samples=num_mc_samples,\n share_samples_across_batch=True,\n logits_only=True,\n eps=eps,\n dtype=tf.float32,\n sngp_var_weight=sngp_var_weight,\n het_var_weight=het_var_weight)\n else:\n output_layer = functools.partial(\n tf.keras.layers.Dense,\n activation=None,\n kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),\n name='fc1000')\n\n outputs = output_layer(num_classes)(x)\n return tf.keras.Model(inputs=inputs, outputs=outputs, name='resnet50')\n\n\ndef resnet50_hetsngp(input_shape,\n batch_size,\n num_classes,\n num_factors,\n use_mc_dropout,\n dropout_rate,\n filterwise_dropout,\n use_gp_layer,\n gp_hidden_dim,\n gp_scale,\n gp_bias,\n gp_input_normalization,\n gp_random_feature_type,\n gp_cov_discount_factor,\n gp_cov_ridge_penalty,\n gp_output_imagenet_initializer,\n use_spec_norm,\n spec_norm_iteration,\n spec_norm_bound,\n temperature,\n num_mc_samples=100,\n eps=1e-5,\n sngp_var_weight=1.,\n het_var_weight=1.,\n omit_last_layer=False):\n \"\"\"Builds ResNet50.\n\n Using strided conv, pooling, four groups of residual blocks, and pooling, the\n network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 ->\n 14x14 -> 7x7 (Table 1 of He et al. (2015)).\n\n Args:\n input_shape: Shape tuple of input excluding batch dimension.\n batch_size: The batch size of the input layer. Required by the spectral\n normalization.\n num_classes: Number of output classes.\n num_factors: Number of factors for the heteroscedastic variance.\n use_mc_dropout: Whether to apply Monte Carlo dropout.\n dropout_rate: Dropout rate.\n filterwise_dropout: Dropout whole convolutional filters instead of\n individual values in the feature map.\n use_gp_layer: Whether to use Gaussian process layer as the output layer.\n gp_hidden_dim: The hidden dimension of the GP layer, which corresponds to\n the number of random features used for the approximation.\n gp_scale: The length-scale parameter for the RBF kernel of the GP layer.\n gp_bias: The bias term for GP layer.\n gp_input_normalization: Whether to normalize the input using LayerNorm for\n GP layer. This is similar to automatic relevance determination (ARD) in\n the classic GP learning.\n gp_random_feature_type: The type of random feature to use for\n `RandomFeatureGaussianProcess`.\n gp_cov_discount_factor: The discount factor to compute the moving average of\n precision matrix.\n gp_cov_ridge_penalty: Ridge penalty parameter for GP posterior covariance.\n gp_output_imagenet_initializer: Whether to initialize GP output layer using\n Gaussian with small standard deviation (sd=0.01).\n use_spec_norm: Whether to apply spectral normalization.\n spec_norm_iteration: Number of power iterations to perform for estimating\n the spectral norm of weight matrices.\n spec_norm_bound: Upper bound to spectral norm of weight matrices.\n temperature: Float or scalar `Tensor` representing the softmax\n temperature.\n num_mc_samples: The number of Monte-Carlo samples used to estimate the\n predictive distribution.\n eps: Float. Clip probabilities into [eps, 1.0] softmax or\n [eps, 1.0 - eps] sigmoid before applying log (softmax), or inverse\n sigmoid.\n sngp_var_weight: Weight in [0,1] for the SNGP variance in the output.\n het_var_weight: Weight in [0,1] for the het. variance in the output.\n omit_last_layer: Optional. Omits the last pooling layer if it is set to\n True.\n\n Returns:\n tf.keras.Model.\n \"\"\"\n dropout_layer = functools.partial(\n MonteCarloDropout,\n dropout_rate=dropout_rate,\n use_mc_dropout=use_mc_dropout,\n filterwise_dropout=filterwise_dropout)\n conv_layer = make_conv2d_layer(use_spec_norm=use_spec_norm,\n spec_norm_iteration=spec_norm_iteration,\n spec_norm_bound=spec_norm_bound)\n\n inputs = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)\n x = tf.keras.layers.ZeroPadding2D(padding=3, name='conv1_pad')(inputs)\n # TODO(jereliu): apply SpectralNormalization to input layer as well.\n x = tf.keras.layers.Conv2D(\n 64,\n kernel_size=7,\n strides=2,\n padding='valid',\n use_bias=False,\n kernel_initializer='he_normal',\n name='conv1')(x)\n x = tf.keras.layers.BatchNormalization(\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n name='bn_conv1')(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = dropout_layer(x)\n x = tf.keras.layers.MaxPooling2D(3, strides=2, padding='same')(x)\n\n x = group(\n x, [64, 64, 256],\n stage=2,\n num_blocks=3,\n strides=1,\n conv_layer=conv_layer,\n dropout_layer=dropout_layer)\n x = group(\n x, [128, 128, 512],\n stage=3,\n num_blocks=4,\n strides=2,\n conv_layer=conv_layer,\n dropout_layer=dropout_layer)\n x = group(\n x, [256, 256, 1024],\n stage=4,\n num_blocks=6,\n strides=2,\n conv_layer=conv_layer,\n dropout_layer=dropout_layer)\n x = group(\n x, [512, 512, 2048],\n stage=5,\n num_blocks=3,\n strides=2,\n conv_layer=conv_layer,\n dropout_layer=dropout_layer)\n\n if omit_last_layer:\n return tf.keras.Model(inputs=inputs, outputs=x, name='resnet50')\n\n return resnet50_hetsngp_add_last_layer(\n inputs, x, num_classes, num_factors, use_gp_layer, gp_hidden_dim,\n gp_scale, gp_bias, gp_input_normalization, gp_random_feature_type,\n gp_cov_discount_factor, gp_cov_ridge_penalty,\n gp_output_imagenet_initializer, temperature, num_mc_samples, eps,\n sngp_var_weight, het_var_weight)\n", "sub_path": "uncertainty_baselines/models/resnet50_hetsngp.py", "file_name": "resnet50_hetsngp.py", "file_ext": "py", "file_size_in_byte": 14848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "tensorflow.keras.layers.Dropout", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 44, "usage_type": "attribute"}, {"api_name": "edward2.initializers.OrthogonalRandomFeatures", "line_number": 53, "usage_type": "call"}, {"api_name": "edward2.initializers", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.initializers.RandomNormal", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 64, "usage_type": "attribute"}, {"api_name": "edward2.layers.SpectralNormalizationConv2D", "line_number": 66, "usage_type": "call"}, {"api_name": "edward2.layers", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 135, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 150, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.add", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 156, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 157, "usage_type": "attribute"}, {"api_name": "string.ascii_lowercase", "line_number": 164, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.GlobalAveragePooling2D", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 230, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.initializers.RandomNormal", "line_number": 236, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 236, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 237, "usage_type": "call"}, {"api_name": "edward2.layers", "line_number": 238, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 257, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 261, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 262, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.initializers.RandomNormal", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 264, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Model", "line_number": 268, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 268, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 346, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 355, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 355, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.ZeroPadding2D", "line_number": 356, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 356, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 358, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 358, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 366, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 366, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 370, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 370, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 372, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 372, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Model", "line_number": 404, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 404, "usage_type": "attribute"}]} +{"seq_id": "648860889", "text": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom homepage.models import *\nfrom homepage.myfunction import *\nfrom django.core.mail import send_mail\nfrom homepage.myclass import *\nfrom django.http import JsonResponse\nfrom datetime import datetime\nfrom django.core.paginator import Paginator\n\ndef usernews(request):\n islog = 0\n newss = News.objects.all().order_by('-createdate')\n\n header = Header.objects.get(headername = 'News')\n\n userdetailnewslist = []\n for news in newss:\n userdetail = UserDetail.objects.get(accountid = news.accountid)\n temp = NewsUserdetail(news, userdetail)\n userdetailnewslist.append(temp)\n \n\n # Chia trang\n # paginator = Paginator(userdetailnewslist, 10) # Show 25 contacts per page\n # page = request.GET.get('page')\n # userdetailnewslist = paginator.get_page(page)\n \n \n if request.session.has_key('username'):\n username = request.session['username']\n account = Account.objects.get(username = username)\n islog = 1\n context = {\n 'newss': newss,\n 'islog': islog,\n 'username': username,\n 'userdetailnewslist': userdetailnewslist,\n 'account': account,\n 'header': header,\n }\n return render(request, 'usernews/usernews.html', context)\n context = {\n 'newss': newss,\n 'islog': islog,\n 'userdetailnewslist': userdetailnewslist,\n 'header': header,\n }\n return render(request, 'usernews/usernews.html', context)\n\ndef usernewsblog(request, id):\n islog = 0\n newss = News.objects.all().order_by('createdate')\n newss = newss[0:10]\n\n news = News.objects.get(newsid = id)\n\n newsreplys = NewsReply.objects.filter(newsid = id).order_by('-createdate')\n lennewsrep = len(newsreplys)\n\n userdetailnewsreplylist = []\n for newsreply in newsreplys:\n newsreply.newsreplyid = 'CommentDelete(' + str(newsreply.newsreplyid) + ')'\n userdetail = UserDetail.objects.get(accountid = newsreply.accountid)\n temp = NewsReplyUserdetail(newsreply, userdetail)\n userdetailnewsreplylist.append(temp)\n\n if request.session.has_key('username'):\n islog = 1\n username = request.session['username']\n account = Account.objects.get(username = username)\n context = {\n 'newss': newss,\n 'news': news,\n 'islog': islog,\n 'username': username,\n 'account': account,\n 'newsreplys': newsreplys,\n 'lennewsrep': lennewsrep,\n 'userdetailnewsreplylist': userdetailnewsreplylist,\n }\n return render(request, 'usernews/usernewsblog.html', context)\n\n context = {\n 'newss': newss,\n 'news': news,\n 'islog': islog,\n 'newsreplys': newsreplys,\n 'lennewsrep': lennewsrep,\n 'userdetailnewsreplylist': userdetailnewsreplylist,\n }\n return render(request, 'usernews/usernewsblog.html', context)\n\n# def usernewspost(request):\n# islog = 0\n# if request.session.has_key('username'):\n# islog = 1\n# account = Account.objects.get(username = request.session['username'])\n \n# if account.accounttypeid.accounttypeid == 1:\n# enviromentcates = EnviromentCate.objects.all()\n# if request.method == \"POST\":\n# enviromentcateid = request.POST.get('enviromentcateid')\n# newsname = request.POST.get('newsname')\n# content = request.POST.get('content')\n# description = request.POST.get('description')\n# try:\n# avatar = request.FILES.get('avatar')\n# except:\n# avatar = None\n# if avatar != None:\n# ava = tokenFile(avatar)\n# else:\n# ava = ''\n# note = request.POST.get('note')\n\n# news = News(\n# enviromentcateid = EnviromentCate.objects.get(enviromentcateid = enviromentcateid),\n# accountid = account,\n# newsname = newsname,\n# description = description,\n# content = content,\n# createdate = datetime.now(),\n# editdate = datetime.now(),\n# avatar = ava,\n# isenable = 1,\n# note = note,\n# )\n# news.save()\n# context = {\n# 'islog': islog,\n# 'account': account,\n# 'enviromentcates': enviromentcates,\n# }\n# return redirect('usernews:usernews')\n# else:\n# context = {\n# 'islog': islog,\n# 'account':account,\n# 'enviromentcates': enviromentcates,\n# }\n# return render(request, 'usernews/usernewspost.html', context) \n# else:\n# return redirect('homepage:index')\n\n \n# else: \n# return redirect('homepage:index')\n\ndef myusernewspost(request, id):\n islog = 0\n if request.session.has_key('username'):\n islog = 1\n account = Account.objects.get(username = request.session['username'])\n enviromentcates = EnviromentCate.objects.all()\n \n # header = Header.objects.get(headername = 'News')\n\n if request.method == \"POST\":\n if request.POST.get('enviromentcateid') == None or request.POST.get('enviromentcateid') == ' ':\n enviromentcateid = None\n else:\n enviromentcateid = EnviromentCate.objects.get(enviromentcateid = request.POST.get('enviromentcateid'))\n description = request.POST.get('description')\n\n newsname = request.POST.get('newsname')\n content = request.POST.get('content')\n try:\n avatar = request.FILES.get('avatar')\n except:\n avatar = None\n if avatar != None:\n ava = tokenFile(avatar)\n else:\n ava = ''\n note = request.POST.get('note')\n\n news = News(\n enviromentcateid = enviromentcateid,\n accountid = account,\n newsname = newsname,\n description = description,\n content = content,\n createdate = datetime.now(),\n editdate = datetime.now(),\n avatar = ava,\n isenable = 1,\n note = note,\n )\n news.save()\n \n return redirect('usernews:myusernews', account.accountid)\n\n context = {\n 'islog': islog,\n 'account':account,\n 'enviromentcates': enviromentcates,\n }\n return render(request, 'usernews/myusernewspost.html', context)\n else: \n return redirect('homepage:index')\n\ndef myusernews(request, id):\n islog = 0\n enviromentcates = EnviromentCate.objects.all() \n\n header = Header.objects.get(headername = 'News') \n\n if request.session.has_key('username'):\n islog = 1\n account = Account.objects.get(username = request.session['username'])\n newss = News.objects.filter(accountid = account.accountid).order_by('-createdate')\n\n userdetailnewslist = []\n for news in newss:\n # news.newsid = 'NewsDelete(' + str(news.newsid) + ')'\n userdetail = UserDetail.objects.get(accountid = news.accountid)\n temp = NewsUserdetail(news, userdetail)\n userdetailnewslist.append(temp)\n\n # Quy định cách hiển thị table forum khi đã đăng nhập\n classdiv = \"col-lg-9\"\n\n context = {\n 'islog': islog,\n 'account':account,\n 'enviromentcates': enviromentcates,\n 'newss': newss,\n 'userdetailnewslist': userdetailnewslist,\n 'classdiv': classdiv,\n 'header': header,\n }\n return render(request, 'usernews/myusernews.html', context)\n\n return redirect('homepage:index')\n\ndef myusernewsedit(request, idacc, idnew):\n islog = 0\n if request.session.has_key('username'):\n islog = 1\n news = News.objects.get(newsid = idnew)\n account = Account.objects.get(username = request.session['username'])\n enviromentcates = EnviromentCate.objects.all()\n \n if request.method == \"POST\":\n if request.POST.get('enviromentcateid') == None or request.POST.get('enviromentcateid') == ' ':\n enviromentcateid = None\n else:\n enviromentcateid = EnviromentCate.objects.get(enviromentcateid = request.POST.get('enviromentcateid'))\n newsname = request.POST.get('newsname')\n description = request.POST.get('description')\n content = request.POST.get('content')\n try:\n avatar = request.FILES.get('avatar')\n except:\n avatar = None\n if avatar != None:\n ava = tokenFile(avatar)\n else:\n ava = ''\n note = request.POST.get('note')\n\n \n # if(enviromentcateid != forum.enviromentcateid.enviromentcateid):\n news = News.objects.filter(newsid = idnew).update(enviromentcateid = enviromentcateid)\n news = News.objects.filter(newsid = idnew).update(newsname = newsname)\n news = News.objects.filter(newsid = idnew).update(editdate = datetime.now())\n news = News.objects.filter(newsid = idnew).update(description = description)\n news = News.objects.filter(newsid = idnew).update(content = content)\n news = News.objects.filter(newsid = idnew).update(note = note)\n \n \n \n return redirect('usernews:myusernews', account.accountid)\n\n context = {\n 'islog': islog,\n 'account':account,\n 'enviromentcates': enviromentcates,\n 'news': news,\n }\n return render(request, 'usernews/myusernewsedit.html', context)\n else: \n return redirect('homepage:index')", "sub_path": "usernews/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 10267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 185, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 185, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 192, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 199, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 201, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 233, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 235, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 267, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 267, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 274, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 282, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "142160337", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Keisuke Okumura\n\nfrom math import sin, cos, pi, fabs\nimport pygame\nfrom pygame.locals import *\nfrom spectrum import Spectrum\n\n\nclass Pydraw(Spectrum):\n \"\"\"Spectrumを継承、描画関数を追加\n \"\"\"\n\n def __init__(self, screen_width, screen_height):\n super(Pydraw, self).__init__(screen_width, screen_height)\n self.bar = 20\n self.rad = 200\n self.vol = 0\n pygame.display.set_caption(\"Audio Visualizer\")\n\n def draw(self, pr):\n self.manage_event()\n self.screen.fill((0, 0, 0))\n dw = self.SCREEN_SIZE[0] / 64\n black = 0\n points = []\n center_w = self.SCREEN_SIZE[0] / 2\n center_h = self.SCREEN_SIZE[1] / 2\n for i, val in enumerate(pr):\n self.screen.fill((black, black, black),\n Rect(i*dw,\n self.SCREEN_SIZE[1] - val * self.bar,\n dw, val * self.bar))\n self.screen.fill((black, black, black),\n Rect(self.SCREEN_SIZE[0] - (i+1)*dw, 0,\n dw, val * self.bar))\n black += 4\n points.append((\n int(center_w+sin((i/64.)*pi*2)*(self.rad+self.bar*val)),\n int(center_h+cos((i/64.)*pi*2)*(self.rad+self.bar*val))\n ))\n if fabs(self.vol - sum(pr)/3) < 10:\n pass\n else:\n self.vol = sum(pr)/3\n pygame.draw.circle(self.screen, (20, 20, 20), (center_w, center_h),\n self.vol)\n pygame.draw.aalines(self.screen, (255, 255, 255), True, points, 2)\n pygame.display.update()\n\n\nif __name__ == '__main__':\n screen_width = 1280\n screen_height = 1024\n spe = Pydraw(screen_width, screen_height)\n spe.record()\n", "sub_path": "visualize01.py", "file_name": "visualize01.py", "file_ext": "py", "file_size_in_byte": 1873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "spectrum.Spectrum", "line_number": 12, "usage_type": "name"}, {"api_name": "pygame.display.set_caption", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 41, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 41, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 42, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 42, "usage_type": "name"}, {"api_name": "math.fabs", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.draw.aalines", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "526780218", "text": "import itertools\n\n#入力値\ninput_num = input()\n\nt = 1\ncounter = 0\nresult = 0\n\nwhile (counter < int(input_num)):\n \n head_num = []\n tail_num = []\n \n #基の値は2つに分ける\n original_t = ((t - 1) / 2)\n head_num.append(original_t)\n tail_num.append(original_t + 1)\n \n #奇数のみ計算\n for num in range(1, int(t) + 1, 2):\n\n #奇数の約数を求める\n if t%num == 0:\n middle_pos = ((num - 1) / 2) + 1\n middle_num = t / num\n head_num.append(middle_num - (num - middle_pos))\n \n if head_num[-1] < 0:\n #最小がマイナス値の場合は相殺\n head_num[-1] = (-1 * head_num[-1]) + 1\n \n #最大値を追加 \n tail_num.append(middle_num + (num - middle_pos))\n\n \n #総当りで連続する和の連続の有無を検証\n for head, tail in itertools.product(head_num, tail_num):\n if head== (tail + 1):\n counter+=1\n break\n \n t+=1\n head_num.clear()\n tail_num.clear()\n \n#inputされた値、n番目の連続する整数の和が連続する表現が可能な数\nprint(t - 1)", "sub_path": "sum.py", "file_name": "sum.py", "file_ext": "py", "file_size_in_byte": 1210, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "itertools.product", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "612170339", "text": "# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport os\nimport unittest\n\nimport six\n\nfrom telemetry import decorators\nfrom telemetry.core import util\n\nfrom core import find_dependencies\n\n\nclass FindDependenciesTest(unittest.TestCase):\n def getErroneousDependencies(self):\n # For some reason, several erreoneous dependencies are reported, but only\n # when running under Python 3. The output from the discovery process does\n # not seem to indicate that anything is actually depending on these, nor\n # do the files themselves import anything other than built-ins and files\n # within dependency_test_dir, so it is unclear why this is happening.\n if six.PY2:\n return set()\n else:\n chromium_src_dir = os.path.realpath(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..',\n '..'))\n return {\n os.path.join(chromium_src_dir, '-'),\n os.path.join(chromium_src_dir, 'build', 'android', 'java'),\n os.path.join(chromium_src_dir, 'build', 'android', 'test'),\n os.path.join(chromium_src_dir, 'third_party', 'catapult',\n 'third_party', 'coverage', '__main__.py'),\n }\n\n @decorators.Disabled('chromeos') # crbug.com/818230\n def testFindPythonDependencies(self):\n try:\n dog_object_path = os.path.join(\n util.GetUnittestDataDir(),\n 'dependency_test_dir', 'dog', 'dog', 'dog_object.py')\n cat_module_path = os.path.join(\n util.GetUnittestDataDir(),\n 'dependency_test_dir', 'other_animals', 'cat', 'cat')\n cat_module_init_path = os.path.join(cat_module_path, '__init__.py')\n cat_object_path = os.path.join(cat_module_path, 'cat_object.py')\n dependencies = set(\n p for p in find_dependencies.FindPythonDependencies(dog_object_path))\n dependencies -= self.getErroneousDependencies()\n self.assertEquals(dependencies, {\n dog_object_path, cat_module_path, cat_module_init_path,\n cat_object_path\n })\n except ImportError: # crbug.com/559527\n pass\n\n @decorators.Disabled('chromeos') # crbug.com/818230\n def testFindPythonDependenciesWithNestedImport(self):\n try:\n moose_module_path = os.path.join(\n util.GetUnittestDataDir(),\n 'dependency_test_dir', 'other_animals', 'moose', 'moose')\n moose_object_path = os.path.join(moose_module_path, 'moose_object.py')\n horn_module_path = os.path.join(moose_module_path, 'horn')\n horn_module_init_path = os.path.join(horn_module_path, '__init__.py')\n horn_object_path = os.path.join(horn_module_path, 'horn_object.py')\n self.assertEquals(\n set(p for p in\n find_dependencies.FindPythonDependencies(moose_object_path)),\n {moose_object_path,\n horn_module_path, horn_module_init_path, horn_object_path})\n except ImportError: # crbug.com/559527\n pass\n", "sub_path": "tools/perf/core/find_dependencies_unittest.py", "file_name": "find_dependencies_unittest.py", "file_ext": "py", "file_size_in_byte": 3040, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unittest.TestCase", "line_number": 16, "usage_type": "attribute"}, {"api_name": "six.PY2", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "telemetry.core.util.GetUnittestDataDir", "line_number": 41, "usage_type": "call"}, {"api_name": "telemetry.core.util", "line_number": 41, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "telemetry.core.util.GetUnittestDataDir", "line_number": 44, "usage_type": "call"}, {"api_name": "telemetry.core.util", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "core.find_dependencies.FindPythonDependencies", "line_number": 49, "usage_type": "call"}, {"api_name": "core.find_dependencies", "line_number": 49, "usage_type": "name"}, {"api_name": "telemetry.decorators.Disabled", "line_number": 37, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "telemetry.core.util.GetUnittestDataDir", "line_number": 62, "usage_type": "call"}, {"api_name": "telemetry.core.util", "line_number": 62, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "core.find_dependencies.FindPythonDependencies", "line_number": 70, "usage_type": "call"}, {"api_name": "core.find_dependencies", "line_number": 70, "usage_type": "name"}, {"api_name": "telemetry.decorators.Disabled", "line_number": 58, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "113420188", "text": "from flask import Flask, Blueprint, abort\nfrom flask.views import MethodView\nfrom application.api.utils import register_api\n\n\ndef test_register_api():\n app = Flask(__name__)\n\n class BarAPI(MethodView):\n def get(self, bar_id):\n if bar_id == 'me':\n return \"me\", 200\n if bar_id is None:\n return 'bars', 200\n else:\n # expose a single user\n return f'single bar {bar_id}', 200\n\n def post(self):\n return 'post', 200\n\n def put(self, bar_id):\n abort(404)\n\n def delete(self, bar_id):\n abort(404)\n\n api = Blueprint('some_api', __name__)\n register_api(api, BarAPI, 'bar_api', '/bars', pk='bar_id', pk_me=True)\n app.register_blueprint(api, url_prefix='/some-api')\n test_client = app.test_client()\n\n rv = test_client.get('/some-api/bars')\n assert rv.data == b'bars'\n assert rv.status_code == 200\n\n rv = test_client.get('/some-api/bars/me')\n assert rv.data == b'me'\n assert rv.status_code == 200\n\n rv = test_client.get('/some-api/bars/123')\n assert rv.data == b'single bar 123'\n assert rv.status_code == 200\n\n rv = test_client.post('/some-api/bars')\n assert rv.data == b'post'\n assert rv.status_code == 200\n\n rv = test_client.put('/some-api/bars/123')\n assert rv.status_code == 404\n\n rv = test_client.delete('/some-api/bars/123')\n assert rv.status_code == 404\n", "sub_path": "tests/application/api/utils/test_register_api.py", "file_name": "test_register_api.py", "file_ext": "py", "file_size_in_byte": 1465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 28, "usage_type": "call"}, {"api_name": "application.api.utils.register_api", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "562726410", "text": "import os\nimport numpy as np\nimport pandas as pd\nimport collections\nfrom scipy import sparse\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n\nimport keras\nfrom keras import optimizers\nfrom keras.models import Model\nfrom keras.layers import Input, Reshape, Dense, Dropout, LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\nnp.random.seed(7)\n\nre_generate = False\n\nfeature_dir = '../data/features/'\ndatadir = '../data/'\n\nensemble_dir = os.path.join(datadir, 'ensemble')\nif not os.path.exists(ensemble_dir):\n os.makedirs(ensemble_dir)\n\nprint ('loading features')\ndf = pd.read_csv(os.path.join(feature_dir, 'df_feature_stage2.csv'))\n\ndf_tr = pd.read_csv(datadir + 'training/data_train.csv'\n , header=None, names=['country', 'sku_id', 'title', 'category_lvl_1',\n 'category_lvl_2', 'category_lvl_3', 'short_description', 'price',\n 'product_type'])\n\ndf_valid = pd.read_csv(datadir + 'testing/data_test.csv'\n , header=None, names=['country', 'sku_id', 'title', 'category_lvl_1',\n 'category_lvl_2', 'category_lvl_3', 'short_description', 'price',\n 'product_type'])\n\nsparse_tfidf_title_clarity = sparse.load_npz(os.path.join(feature_dir, 'sparse_clarity.npz'))\nsparse_tfidf_title_conciseness = sparse.load_npz(os.path.join(feature_dir, 'sparse_conciseness.npz'))\n\ntr_clarity = pd.read_csv(datadir + 'training/clarity_train.labels', header=None)\ntr_conciseness = pd.read_csv(datadir + 'training/conciseness_train.labels', header=None)\n\nfeatures = ['my'\n , 'ph'\n , 'sg'\n , 'NA'\n , 'international'\n , 'local'\n , 'title_wordnum'\n , 'wordnum_q1'\n , 'wordnum_q2'\n , 'wordnum_q3'\n , 'title_stopsnum'\n , 'title_word_duplicate_cat_num'\n , 'wordnum_q4'\n , 'wordnum_q5'\n , 'title_nonalphanum'\n , 'title_word_duplicate_num'\n , 'title_word_duplicate_nums'\n , 'title_charnum'\n , 'title_avgwordlen'\n , 'title_nonengnum'\n , 'title_wordsynsetdepthsum'\n , 'price'\n , 'title_ent_cat_list_num_mean'\n , 'title_ent_cat_list_num_std'\n , 'title_ent_cat_list_num_max'\n , 'title_ent_cat_list_num'\n , 'title_wordlemmassnum'\n , 'category_lvl_1_frequency'\n , 'category_lvl_2_frequency'\n , 'category_lvl_3_frequency'\n , 'title_wordcharlargenum'\n , 'title_uppernum'\n , 'title_cat3_wordnum_mean_contrast'\n , 'title_C_upperwordratio'\n , 'title_meaningword_ratio'\n , 'title_type_check_num'\n\n , 'title_word_lcs_num'\n , 'title_word_lcs_cat_num'\n , 'title_digitnum'\n , 'title_word_duplicate_num2'\n , 'tittle_upper_word'\n , 'tittle_small_upper_word'\n , 'title_word_duplicate_num_cleaned'\n , 'title_word_duplicate_cat_num_cleaned'\n , 'description_C_upperratio'\n , 'description_C_upperwordratio'\n , 'description_nonalphanum'\n , 'description_li_num'\n\n , 'clarity_prob'\n # , 'clarity_prob_2'\n , 'clarity_prob_lgb'\n , 'clarity_prob_rf'\n , 'clarity_prob_dart'\n , 'clarity_prob_ada'\n # , 'clarity_prob_b'\n # , 'clarity_prob_lgb_b'\n # , 'clarity_prob_rf_b'\n # , 'clarity_prob_dart_b'\n # , 'clarity_prob_ada_b'\n # , 'clarity_prob_knn_b'\n # , 'clarity_prob_svm_b'\n # , 'clarity_prob_nb_b'\n # , 'clarity_prob_mlp_b'\n\n , 'conciseness_prob'\n # , 'conciseness_prob_2'\n , 'conciseness_prob_lgb'\n , 'conciseness_prob_rf'\n , 'conciseness_prob_dart'\n , 'conciseness_prob_ada'\n # , 'conciseness_prob_b'\n # , 'conciseness_prob_lgb_b'\n # , 'conciseness_prob_rf_b'\n # , 'conciseness_prob_dart_b'\n # , 'conciseness_prob_ada_b'\n # , 'conciseness_prob_knn_b'\n # , 'conciseness_prob_svm_b'\n # , 'conciseness_prob_nb_b'\n # , 'conciseness_prob_mlp_b'\n ]\n\n\nmin_max_scaler = preprocessing.MinMaxScaler()\n\ndf = df.fillna(-1)\ndf_scale = preprocessing.scale(df[features].values)\ndf_scale = pd.DataFrame(df_scale, columns=features)\n\ndf_tr_n = df_scale.iloc[:df_tr.shape[0]]\ndf_valid_n = df_scale.iloc[df_tr.shape[0]:]\n\nX_train_f = df_tr_n[features].values\nX_valid_f = df_valid_n[features].values\n\nprint (X_train_f.shape)\nprint(\"preparing word embedding\")\nif re_generate:\n words_list, words_list_tr, words_list_valid = [], [], []\n\n for i in range(df_tr.shape[0]):\n words_list.append(df_tr.iloc[i]['title'])\n words_list_tr.append(df_tr.iloc[i]['title'])\n\n for i in range(df_valid.shape[0]):\n words_list.append(df_valid.iloc[i]['title'])\n words_list_valid.append(df_valid.iloc[i]['title'])\n\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(words_list)\n\n tr_sequences = tokenizer.texts_to_sequences(words_list_tr)\n te_sequences = tokenizer.texts_to_sequences(words_list_valid)\n\n word_index = tokenizer.word_index\n max_review_length = 40\n\n X_train = pad_sequences(tr_sequences, maxlen=max_review_length)\n y_train = tr_conciseness.values.reshape((tr_conciseness.shape[0], 1))\n # y_train = tr_clarity.values.reshape((tr_conciseness.shape[0], 1))\n\n X_valid = pad_sequences(te_sequences, maxlen=max_review_length)\n\n nb_words = len(word_index)+1\n\n EMBEDDING_FILE = datadir + 'file_temp/glove.840B.300d.txt'\n\n embeddings_index = {}\n f = open(EMBEDDING_FILE)\n count = 0\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n embedding_matrix = np.zeros((nb_words, 300))\n\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n print(\"preparing category embedding\")\n np.save(datadir + 'file_temp/embedding', embedding_matrix)\nelse:\n words_list, words_list_tr, words_list_valid = [], [], []\n\n for i in range(df_tr.shape[0]):\n words_list.append(df_tr.iloc[i]['title'])\n words_list_tr.append(df_tr.iloc[i]['title'])\n\n for i in range(df_valid.shape[0]):\n words_list.append(df_valid.iloc[i]['title'])\n words_list_valid.append(df_valid.iloc[i]['title'])\n\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(words_list)\n\n tr_sequences = tokenizer.texts_to_sequences(words_list_tr)\n te_sequences = tokenizer.texts_to_sequences(words_list_valid)\n\n word_index = tokenizer.word_index\n max_review_length = 40\n nb_words = len(word_index)+1\n\n X_train = pad_sequences(tr_sequences, maxlen=max_review_length)\n y_train = tr_conciseness.values.reshape((tr_conciseness.shape[0], 1))\n # y_train = tr_clarity.values.reshape((tr_conciseness.shape[0], 1))\n\n X_valid = pad_sequences(te_sequences, maxlen=max_review_length)\n\n embedding_matrix = np.load(datadir + 'file_temp/embedding.npy')\n\n\ndef creat_cats(cat_list, df, cat_string):\n for i in range(df.shape[0]):\n cat_list.append(df[cat_string].iloc[i])\n return cat_list\n\ncat_strings = ['sku_id', 'category_lvl_1', 'category_lvl_2', 'category_lvl_3']\n\ncat_to_ix_lens = []\nfor cat_string in cat_strings:\n cats_list = []\n cats_list = creat_cats(cats_list, df_tr, cat_string)\n cats_list = creat_cats(cats_list, df_valid, cat_string)\n\n cats_count = collections.Counter(cats_list)\n\n cat_to_ix = {cat: i for i, cat in enumerate(cats_count.keys())}\n cat_to_ix_lens.append(len(cat_to_ix))\n X_train_cat, X_valid_cat = [], []\n\n for i in range(df_tr.shape[0]):\n idxs = [cat_to_ix[df_tr[cat_string].iloc[i]]]\n X_train_cat.append(idxs)\n\n for i in range(df_valid.shape[0]):\n idxs = [cat_to_ix[df_valid[cat_string].iloc[i]]]\n X_valid_cat.append(idxs)\n\n X_train_cat = np.asarray(X_train_cat)\n X_valid_cat = np.asarray(X_valid_cat)\n\n max_review_cat_length = 1\n X_train_cat = sequence.pad_sequences(X_train_cat, maxlen=max_review_cat_length)\n X_train = np.hstack([X_train, X_train_cat])\n X_valid_cat = sequence.pad_sequences(X_valid_cat, maxlen=max_review_cat_length)\n X_valid = np.hstack([X_valid, X_valid_cat])\n\nX_train = np.hstack([X_train, X_train_f])\nX_valid = np.hstack([X_valid, X_valid_f])\n\nX_train_split, X_test_split, y_train_split, y_test_split = train_test_split(X_train, y_train, test_size=0.13, random_state=41)\n\nmain_input = Input(shape=(max_review_length,), dtype='int32', name='main_input')\n\nEmbedding_layer = Embedding(nb_words, 300, weights=[embedding_matrix], input_length=max_review_length, trainable=True)\nword_embedding = Embedding_layer(main_input)\n\nauxiliary_input_sku = Input(shape=(max_review_cat_length,), dtype='int32', name='aux_input_sku')\nsku_embedding = Embedding(output_dim=16, input_dim=cat_to_ix_lens[0], input_length=max_review_cat_length)\\\n (auxiliary_input_sku)\n\nauxiliary_input_cat1 = Input(shape=(max_review_cat_length,), dtype='int32', name='aux_input_cat1')\ncat1_emdedding = Embedding(output_dim=4, input_dim=cat_to_ix_lens[1], input_length=max_review_cat_length)\\\n (auxiliary_input_cat1)\n\nauxiliary_input_cat2 = Input(shape=(max_review_cat_length,), dtype='int32', name='aux_input_cat2')\ncat2_emdedding = Embedding(output_dim=4, input_dim=cat_to_ix_lens[2], input_length=max_review_cat_length)\\\n (auxiliary_input_cat2)\n\nauxiliary_input_cat3 = Input(shape=(max_review_cat_length,), dtype='int32', name='aux_input_cat3')\ncat3_emdedding = Embedding(output_dim=8, input_dim=cat_to_ix_lens[3], input_length=max_review_cat_length)\\\n (auxiliary_input_cat3)\n\nauxiliary_input_numeric = Input(shape=(X_train_f.shape[1],), dtype='float32', name='aux_input_numeric')\n\n\nsku_embedding = Reshape((16, ))(sku_embedding)\ncat1_emdedding = Reshape((4, ))(cat1_emdedding)\ncat2_emdedding = Reshape((4, ))(cat2_emdedding)\ncat3_emdedding = Reshape((8, ))(cat3_emdedding)\n\ncat_emdedding = keras.layers.concatenate([sku_embedding, cat1_emdedding, cat2_emdedding, cat3_emdedding], axis=1)\n\nlstm_out = LSTM(64, recurrent_dropout=0.1, return_sequences=True)(word_embedding)\nlstm_out = LSTM(64)(lstm_out)\n\nmlp1 = Dense(64, activation='relu')(auxiliary_input_numeric)\nmlp2 = Dense(128, activation='relu')(mlp1)\nmlp3 = Dense(192, activation='relu')(mlp2)\nmlp3 = Dropout(0.1)(mlp3)\n\nx = keras.layers.concatenate([lstm_out, cat_emdedding, mlp3])\n\nx = Dense(128, activation='relu')(x)\nx = Dense(128, activation='relu')(x)\nx = Dropout(0.1)(x)\n\nmain_output = Dense(1, activation='sigmoid', name='main_output')(x)\n\nmodel = Model(inputs=[main_input, auxiliary_input_sku, auxiliary_input_cat1, auxiliary_input_cat2,\n auxiliary_input_cat3, auxiliary_input_numeric],\n outputs=[main_output])\n\noptim = optimizers.RMSprop(0.0005)\nmodel.compile(optimizer=optim, loss='binary_crossentropy', metrics=['mse'])\n\nbst_model_path = datadir+'file_temp/best_tmp.h5'\n\nearly_stopping = EarlyStopping(monitor='val_loss', patience=2)\nmodel_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)\n\n# hist = model.fit([X_train_split[:, :max_review_length], X_train_split[:, max_review_length],\n# X_train_split[:, max_review_length+1],X_train_split[:, max_review_length+2],\n# X_train_split[:, max_review_length+3], X_train_split[:, max_review_length+4:]], [y_train_split],\n# validation_data=[[X_test_split[:, :max_review_length], X_test_split[:, max_review_length],\n# X_test_split[:, max_review_length+1], X_test_split[:, max_review_length+2],\n# X_test_split[:, max_review_length+3], X_test_split[:, max_review_length+4:]],\n# [y_test_split]], epochs=7, verbose=2, batch_size=24, callbacks=[early_stopping, model_checkpoint])\n\n# model.load_weights(bst_model_path)\n# bst_val_score = min(hist.history['val_loss'])\n\nmodel.fit([X_train[:, :max_review_length], X_train[:, max_review_length], X_train[:, max_review_length+1],\n X_train[:, max_review_length+2], X_train[:, max_review_length+3],\n X_train[:, max_review_length+4:]], [y_train], epochs=3, verbose=2, batch_size=24)\n\nvalid_predict = model.predict([X_valid[:, :max_review_length], X_valid[:, max_review_length],\n X_valid[:, max_review_length + 1], X_valid[:, max_review_length+2], X_valid[:, max_review_length+3],\n X_valid[:, max_review_length+4:]], batch_size=24)\n\nnp.savetxt(os.path.join(ensemble_dir, 'conciseness_test_lstm.predict'),\n valid_predict, fmt='%.6f')\n", "sub_path": "model/model_rnn.py", "file_name": "model_rnn.py", "file_ext": "py", "file_size_in_byte": 13457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.random.seed", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.sparse.load_npz", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "scipy.sparse.load_npz", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 133, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 136, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 137, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 158, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 167, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 196, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 208, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 218, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 224, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 255, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 258, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 258, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 259, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 260, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 260, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 264, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 266, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 268, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 270, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 273, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 274, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 277, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 278, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 281, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 282, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 285, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 286, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 289, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 292, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 293, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 294, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 295, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 297, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 297, "usage_type": "attribute"}, {"api_name": "keras.layers.LSTM", "line_number": 299, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 300, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 302, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 303, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 304, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 305, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 307, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 307, "usage_type": "attribute"}, {"api_name": "keras.layers.Dense", "line_number": 309, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 310, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 311, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 313, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 315, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 319, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 319, "usage_type": "name"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 324, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path", "line_number": 346, "usage_type": "attribute"}]} +{"seq_id": "360842765", "text": "import sys\nimport os\nimport time\nimport re\nimport json\nimport ast\nimport shutil\nimport subprocess\nimport shlex\nfrom collections import defaultdict\n\n \n\ndef wc_L(s_File):\n shell_CMD_1 = ''.join(('more ',s_File))\n shell_CMD_2 = ''.join(('wc -l'))\n p1 = subprocess.Popen(shlex.split(shell_CMD_1), stdout=subprocess.PIPE)\n p2 = subprocess.Popen(shlex.split(shell_CMD_2), stdin=p1.stdout, stdout=subprocess.PIPE)\n p1.wait()\n p2.wait()\n shell_Results = p2.communicate()[0]\n return(int(shell_Results.decode().strip('\\n')))\n\n#s_File = '/opt/py/JSON/show_lpts.txt'\n#wc = wc_L(s_File)\n\n\ndef wc_W(s_File):\n fd_s = open(s_File,'r+')\n wc = wc_L(s_File)\n max_Wid = 0\n for i in range(1,wc+1):\n line = fd_s.readline()\n pureCMD = line.strip('\\n')\n kw_Len = pureCMD.split(' ').__len__()\n if(max_Wid < kw_Len):\n max_Wid = kw_Len\n return(max_Wid)\n\n\ndef pipe_Shell_CMD(shell_CMDs,d_File):\n fd = open(d_File,'w+')\n len = shell_CMDs.__len__()\n p = {}\n p[1] = subprocess.Popen(shlex.split(shell_CMDs[1]), stdout=subprocess.PIPE)\n for i in range(2,len):\n p[i] = subprocess.Popen(shlex.split(shell_CMDs[i]), stdin=p[i-1].stdout, stdout=subprocess.PIPE)\n p[len] = subprocess.Popen(shlex.split(shell_CMDs[len]), stdin=p[len-1].stdout, stdout=fd)\n for i in range(2,len+1):\n p[i].wait()\n fd.close()\n\ndef file_Sort_Format(s_File,wc):\n max_Width = 0\n s_File_Sorted = '{0}_Sorted'.format(s_File)\n if(os.path.isfile(s_File_Sorted)):\n subprocess.call(shlex.split('rm {0}'.format(s_File_Sorted)))\n #Run the command described by args. Wait for command to complete, then return the returncode attribute\n shell_CMD_1 = 'more {0}'.format(s_File)\n shell_CMD_2 = 'sort'\n shell_CMDs = {1:shell_CMD_1,2:shell_CMD_2}\n pipe_Shell_CMD(shell_CMDs,s_File_Sorted)\n fd_s = open(s_File_Sorted,'r+')\n d_File_Sorted_Formated = '{0}_Formated'.format(s_File_Sorted)\n if(os.path.isfile(d_File_Sorted_Formated)):\n subprocess.call(shlex.split('rm {0}'.format(d_File_Sorted_Formated)))\n fd_d = open(d_File_Sorted_Formated,'a+')\n for i in range(1,wc+1):\n line = fd_s.readline()\n pureCMD = line.strip('\\n').strip(' ')\n kw_Len = pureCMD.split(' ').__len__()\n if(max_Width < kw_Len):\n max_Width = kw_Len\n fd_d.write(pureCMD)\n fd_d.write('\\n')\n fd_s.close()\n fd_d.close()\n return((d_File_Sorted_Formated,max_Width))\n\n# SF_W = file_Sort_Format(s_File,wc)\n# sorted_File_Sorted_Formated = SF_W[0]\n# max_Width = SF_W[1]\n\ndef awk_Cols_CMD(start_Col,end_Col):\n awk_Cols = ''\n for i in range(start_Col,end_Col+1):\n awk_Cols = ''.join((awk_Cols,'$',str(i),','))\n awk_Cols = awk_Cols.strip(',')\n return(''.join(('awk {\\'print ',awk_Cols,'\\'}')))\n\ndef file_Select_Cols(sorted_File_Sorted_Formated,start_Col,end_Col):\n dir = os.path.dirname(sorted_File_Sorted_Formated)\n basename = os.path.basename(sorted_File_Sorted_Formated)\n workdir = ''.join((dir,'/',basename,'_workdir'))\n selected_Cols_File = ''.join((workdir,'/',str(start_Col),'-',str(end_Col)))\n if(os.path.isdir(workdir)):\n pass\n else:\n os.mkdir(workdir)\n if(os.path.isfile(selected_Cols_File)):\n subprocess.call('rm {0}'.format(selected_Cols_File),shell=True)\n #If shell is True, it is recommended to pass args as a string rather than as a sequence.\n shell_CMD_1 = ''.join(('more ',sorted_File_Sorted_Formated))\n shell_CMD_2 = awk_Cols_CMD(start_Col,end_Col)\n shell_CMDs = {1:shell_CMD_1,2:shell_CMD_2}\n pipe_Shell_CMD(shell_CMDs,selected_Cols_File)\n return(selected_Cols_File)\n\n# start_Col = 1\n# end_Col = 5\n# selected_Cols_File = file_Select_Cols(sorted_File_Sorted_Formated,start_Col,end_Col)\n\ndef tree():\n return defaultdict(tree)\n\ndef add_Nodes(CMD_tree,selected_Cols_File,wc,regex_Width):\n reg_Result = regex_Width.search(selected_Cols_File)\n if(reg_Result == None):\n width = wc_W(selected_Cols_File)\n else:\n width = int(reg_Result.group(1))\n fd_SCF = open(selected_Cols_File,'r+')\n for i in range(1,wc+1):\n line = fd_SCF.readline()\n pureCMD = line.strip('\\n').strip(' ')\n keywords = pureCMD.split(' ')\n len = keywords.__len__()\n code ='CMD_tree'\n for each in keywords:\n code = '{0}[\\'{1}\\']'.format(code,each)\n if(len == width):\n pass\n elif(each ==''):\n pass\n else:\n code = '{0}[\\'\\']'.format(code)\n eval(code)\n\n\ndef file_Filter_By_Width_Range(s_File,wc,wid_Lower,wid_Upper):\n fd_s = open(s_File,'r+')\n d_file = '{0}_By_Width_{1}-{2}'.format(s_File,wid_Lower,wid_Upper)\n if(os.path.exists(d_file)):\n subprocess.call('rm {0}'.format(d_file),shell=True)\n fd_d = open(d_file,'a+')\n for i in range(0,wc):\n line = fd_s.readline()\n wid = line.strip('\\n').strip(' ').split(' ').__len__()\n if( wid > wid_Upper):\n pass\n elif(wid < wid_Lower):\n pass\n else:\n fd_d.write(line)\n fd_s.close()\n fd_d.close()\n return(d_file)\n\n\ndef sort_U(s_File,d_File):\n fd_d = open(d_File,'w+')\n shell_CMD_1 = ''.join(('more ',s_File))\n shell_CMD_2 = ''.join(('sort -u'))\n p1 = subprocess.Popen(shlex.split(shell_CMD_1), stdout=subprocess.PIPE)\n p2 = subprocess.Popen(shlex.split(shell_CMD_2), stdin=p1.stdout, stdout=fd_d)\n p1.wait()\n p2.wait()\n fd_d.close()\n return(d_File)\n\n\ndef get_Entry_Filter_Line_Dict(entry_File,sorted_File_Sorted_Formated,max_Width,regex_Underscore):\n workdir = os.path.dirname(entry_File)\n wc_EF = wc_L(entry_File)\n fd_EF = open(entry_File,'r+')\n entries = {}\n for i in range(1,wc_EF+1):\n line = fd_EF.readline().strip('\\n')\n keys_Len = line.split(' ').__len__()\n shell_CMDs = {}\n shell_CMDs[1] = ''.join(('more ',sorted_File_Sorted_Formated))\n shell_CMDs[2] = ''.join(('egrep ','\"',line,'\"'))\n awk_Cols = ''\n for j in range(keys_Len+1,max_Width+1):\n awk_Cols = ''.join((awk_Cols,'$',str(j),','))\n awk_Cols = awk_Cols.strip(',')\n shell_CMDs[3] = ''.join(('awk {\\'print ',awk_Cols,'\\'}'))\n name = regex_Underscore.sub('_',line,0)\n d_File_Sorted_Formated = ''.join((workdir,'/',name))\n pipe_Shell_CMD(shell_CMDs,d_File_Sorted_Formated)\n entries[i] = d_File_Sorted_Formated\n fd_EF.close()\n return(entries)\n\n\ndef format_Subfiles(entries):\n entries_Sub = {}\n for i in range(1,entries.__len__()+1):\n fd_orig = open(entries[i],'r+')\n wc_orig = wc_L(entries[i])\n subfile = '{0}_Subfile'.format(entries[i])\n fd_new = open(subfile,'w+')\n for j in range(1,wc_orig+1):\n line = fd_orig.readline().strip('\\n').strip(' ')\n fd_new.write(line)\n fd_new.write('\\n')\n fd_orig.close()\n fd_new.close()\n os.system('rm {0}'.format(entries[i]))\n entries_Sub[i] = subfile\n return(entries_Sub)\n\n\ndef j_Dict_File(subfile,wc,regex_Width):\n CMD_tree = tree()\n wc = wc_L(subfile)\n add_Nodes(CMD_tree,subfile,wc,regex_Width)\n j_Dict_Str = json.dumps(CMD_tree,sort_keys=True,indent=4)\n get_Entry_File = '{0}_Jdict'.format(subfile)\n fd_get_Entry_Jdict = open(get_Entry_File,'w+')\n fd_get_Entry_Jdict.write(j_Dict_Str)\n fd_get_Entry_Jdict.close()\n return(get_Entry_File)\n\n \n\ns_File = '/opt/py/JSON/show_lpts.txt'\nwc = wc_L(s_File)\nSF_W = file_Sort_Format(s_File,wc)\nsorted_File_Sorted_Formated = SF_W[0]\nmax_Width = SF_W[1]\nstart_Col = 1\nend_Col = 5\nselected_Cols_File = file_Select_Cols(sorted_File_Sorted_Formated,start_Col,end_Col)\nCMD_tree = tree()\nwc = wc_L(selected_Cols_File)\nregex_Width = re.compile('.*[0-9]\\-([0-9])')\nj_Dict_Entry_File = j_Dict_File(selected_Cols_File,wc,regex_Width)\nwid_Lower = 5\nwid_Upper = 5\nentry_File_Raw = file_Filter_By_Width_Range(selected_Cols_File,wc,wid_Lower,wid_Upper)\nentry_File = '{0}_Uniq'.format(entry_File_Raw)\nsort_U(entry_File_Raw,entry_File)\nregex_Underscore = re.compile('( )|\\<|\\>')\nentries = get_Entry_Filter_Line_Dict(entry_File,sorted_File_Sorted_Formated,max_Width,regex_Underscore)\nentries_Sub = format_Subfiles(entries)\nentries_Sub_Len = entries_Sub.__len__()\n\n\nfor i in range(1,entries_Sub_Len + 1):\n CMD_tree = tree()\n wc = wc_L(entries_Sub[i])\n add_Nodes(CMD_tree,entries_Sub[i],wc,regex_Width)\n j_Dict_Str = json.dumps(CMD_tree,sort_keys=True,indent=4)\n get_Entry_File = '{0}_Jdict'.format(entries_Sub[i])\n fd_get_Entry_Jdict = open(get_Entry_File,'w+')\n fd_get_Entry_Jdict.write(j_Dict_Str)\n fd_get_Entry_Jdict.close()\n\n\n \n\n\n", "sub_path": "DRAFT/CLI-JSON.py", "file_name": "CLI-JSON.py", "file_ext": "py", "file_size_in_byte": 8705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "subprocess.Popen", "line_number": 17, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 17, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 18, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 18, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 45, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 45, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 47, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 47, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 48, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 57, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 66, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 101, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 144, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 164, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 164, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 164, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 165, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 209, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 218, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 237, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 244, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "245747653", "text": "import argparse\nfrom datetime import datetime, timezone, timedelta\nimport json\nimport os\n\nenv = os.environ.get\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dry-run\", action=\"store_true\")\n parser.add_argument(\"--print\", action=\"store_true\")\n parser.add_argument(\"--config-path\", default=env(\"FEED_CONFIG\"))\n parser.add_argument(\"--mail-user\", default=env(\"FEED_MAIL_USER\"))\n parser.add_argument(\"--mail-password\", default=env(\"FEED_MAIL_PASS\"))\n parser.add_argument(\"--mail-host\", default=env(\"FEED_MAIL_HOST\"))\n parser.add_argument(\"--mail-to\", default=env(\"FEED_MAIL_TO\"))\n\n return parser.parse_args()\n\n\ndef save_feeds(path, config, updated_items):\n now = datetime.now(timezone.utc).isoformat()\n for name, time in config.items():\n if name in updated_items:\n config[name] = now\n else:\n config[name] = time.isoformat()\n\n contents = json.dumps(config, indent=2, sort_keys=True)\n\n with open(path, 'w+') as f:\n f.write(json.dumps(config, indent=2, sort_keys=True))\n\n\ndef load_feeds(configPath):\n with open(configPath) as f:\n config = {}\n for k,v in json.load(f).items():\n try:\n # Default to old enough that it wont filter out any messages.\n config[k] = (datetime.now(timezone.utc) -\n timedelta(days=500000))\n if v:\n config[k] = datetime.fromisoformat(v)\n except ValueError:\n print(f\"{k} has invalid date of {v}\")\n return config\n", "sub_path": "feed2mail/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 22, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "json.load", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime.fromisoformat", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "520720676", "text": "import xml.etree.ElementTree as ET\nimport xml.dom.minidom as minidom\n\n\ndef test_et():\n # st = ET.Element('ST_BRIDGE', version=\"1.4.00\")\n st = ET.Element('ST_BRIDGE')\n st.set('version', '1.4.00')\n\n mdl = ET.SubElement(st, 'StbModel')\n axes = ET.SubElement(mdl, 'StbAxes')\n x1 = ET.SubElement(axes, 'StbX_Axis')\n x1.set('id', '1')\n x1.set('name', '1')\n x1.set('distance', '0')\n tree = ET.ElementTree(element=st)\n\n # ET.dump(tree)\n\n string = ET.tostring(tree.getroot(), 'utf-8')\n pretty_string = minidom.parseString(string).toprettyxml(indent=' ')\n print(pretty_string)\n\n\ndef xtest_2():\n root = ET.Element('root')\n\n sub = ET.SubElement(root, 'sub')\n\n subsub = ET.SubElement(sub, 'subsub')\n subsub.set('key', 'value')\n subsub.text = 'text'\n\n subsub2 = ET.SubElement(sub, 'subsub2')\n subsub2.set('key2', 'あvalue2日本語')\n subsub2.text = 'あtext2'\n\n tree = ET.ElementTree(element=root)\n ET.dump(tree)\n", "sub_path": "test_ElementTree.py", "file_name": "test_ElementTree.py", "file_ext": "py", "file_size_in_byte": 974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "xml.etree.ElementTree.Element", "line_number": 7, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 7, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 10, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 10, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 11, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 11, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 12, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 12, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ElementTree", "line_number": 16, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 16, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 20, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 20, "usage_type": "name"}, {"api_name": "xml.dom.minidom.parseString", "line_number": 21, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 21, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 26, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 26, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 28, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 30, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 30, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 34, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 34, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ElementTree", "line_number": 38, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 38, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.dump", "line_number": 39, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "78506294", "text": "'''\r\nCreated on 2016年2月4日\r\n\r\n@author: Adams Zhou\r\n'''\r\nfrom winstock.dao.stockPriceDao import StockPriceDao\r\nimport logging\r\n\r\nclass StockPriceService:\r\n def __init__(self, conn):\r\n self.logger = logging.getLogger(\"winstock.service.StockPriceService\")\r\n self.stockPriceDao = StockPriceDao(conn)\r\n self.conn = conn\r\n \r\n def importStockPrice(self, stockPriceList):\r\n self.logger.info(\"StockPriceService.importStockPrice start\")\r\n try:\r\n for stockPrice in stockPriceList:\r\n \r\n count = self.stockPriceDao.getCountByKey(stockPrice)\r\n \r\n if count == 0:\r\n self.logger.info(\"insert\")\r\n self.stockPriceDao.insert(stockPrice)\r\n else:\r\n self.logger.info(\"update\")\r\n self.stockPriceDao.update(stockPrice)\r\n except Exception as ex:\r\n self.logger.error(ex)\r\n self.conn.rollback()\r\n \r\n self.conn.commit()\r\n self.logger.info(\"StockPriceService.importStockPrice end\")\r\n \r\n def updateStockPrice(self, stockPrice):\r\n self.stockPriceDao.update(stockPrice)\r\n self.conn.commit()\r\n \r\n def deleteStockPriceByKey(self, stockCode):\r\n self.stockPriceDao.deleteByKey(stockCode)\r\n self.conn.commit()\r\n \r\n def getStockPriceByKey(self, stockPrice):\r\n return self.stockPriceDao.getByKey(stockPrice)\r\n \r\n def getAllStockPrice(self):\r\n return self.stockPriceDao.getAll()\r\n ", "sub_path": "winstock/service/stockPriceService.py", "file_name": "stockPriceService.py", "file_ext": "py", "file_size_in_byte": 1586, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "winstock.dao.stockPriceDao.StockPriceDao", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "614157855", "text": "from pyfirmata import Arduino,util\r\nimport time\r\n\r\n#핀 모드설정\r\nboard = Arduino('COM8')\r\nanalog_input= board.get_pin('a:0:i') # 0번핀 입력\r\nled = board.get_pin('d:13:o') # 13번핀 출력\r\n\r\nit = util.Iterator(board) # 회로의 입력상태를 읽어올 변수 선언\r\nit.start()\r\n\r\nwhile True:\r\n analog_value = analog_input.read()\r\n if analog_value is not None:\r\n delay = analog_value + 0.01\r\n print(analog_value)\r\n led.write(1)\r\n time.sleep(delay)\r\n led.write(0)\r\n time.sleep(delay)\r\n else:\r\n time.sleep(0.1)\r\n\r\n\r\n", "sub_path": "6.LED with Potentiometer.py", "file_name": "6.LED with Potentiometer.py", "file_ext": "py", "file_size_in_byte": 585, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pyfirmata.Arduino", "line_number": 5, "usage_type": "call"}, {"api_name": "pyfirmata.util.Iterator", "line_number": 9, "usage_type": "call"}, {"api_name": "pyfirmata.util", "line_number": 9, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "335128209", "text": "#! /usr/bin/python\n\n# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n__author__ = \"Arius_EX\"\n__date__ = \"$11 12, 16 7:26:30 PM$\"\n\nfrom datetime import datetime\n\nfrom connectdb import ConnectDB\nfrom purchase_request import PurchaseRequest\n\nc = ConnectDB()\nconnection = c.connection()\ncur = connection.cursor()\n\npreq = PurchaseRequest()\n\nclass RequestForQuotation:\n \n def addRequest(self, quotationNum, refnum, projName, projLoc, canvasser):\n\n \n counter = RequestForQuotation.getMaxCounter(self) + 1\n currentdate = datetime.now().strftime('%Y-%m-%d')\n \t\n sql = \"insert into req_for_quotation values('\"+quotationNum+\"', '\"+refnum+\"', '\"+projName+\"', '\"+projLoc+\"', '\"+currentdate+\"', '\"+canvasser+\"', \"+str(counter)+\");\"\n cur.execute(sql)\n connection.commit()\n print(\"Done\")\n\n def addComToReq(self, quotNum, compid):\n \t\n \tsql = \"insert into req_for_quotation_suppliers values('\"+quotNum+\"', '\"+compid+\"');\"\n \t\n \tcur.execute(sql)\n \tconnection.commit()\n \tprint(\"Done\")\n\n def addItemToReq(self, quotNum, itemNum, description, unit, unitprice, quantity):\n \n sql = \"insert into req_for_quotation_items values('\"+quotNum+\"', \"+str(itemNum)+\", '\"+description+\"', \"+str(quantity)+\", '\"+unit+\"', \"+str(unitprice)+\");\"\n print(sql)\n \n cur.execute(sql)\n connection.commit()\n print(\"Done\") \n\n def updateComTerms(self, quotNum, compid, warrantyper, delperiod, pricevalidity):\n \t\n \tsql = \"update req_for_quotation_suppliers set warrantyper = \"+warrantyper+\", delperiod = \"+delperiod+\", pricevalidity = \"+pricevalidity+\" where quotationnum = '\"+quotNum+\"' and compid = '\"+compid+\"'\" \n\n \tcur.execute(sql)\n \tconnection.commit()\n \tprint(\"Done\")\n\n \n def getComTerms(self, quotnum, compid):\n \n sql = \"select * from req_for_quotation_suppliers where quotationnum = '\"+quotnum+\"' and compid = '\"+compid+\"'\"\n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall()\n\n return result\n \n\n def getReqItems(self, quotNum):\n \t\n sql = \"select * from req_for_quotation_items where quotationnum = '\"+ quotNum + \"';\"\n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall()\n \t\n return result\n\n def getReqComp(self, quotNum):\n \n sql = \"select * from req_for_quotation_suppliers where quotationnum = '\"+ quotNum + \"';\" \n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall()\n \n return result\n \n def getRequestDetails(self, quotNum):\n \t\n \tsql = \"select * from req_for_quotation where quotationnum = '\"+quotNum+\"'\"\n\n \tcur.execute(sql)\n \tconnection.commit()\n \tresult = cur.fetchall()\n\n \treturn result\n \n def getMaxCounter(self):\n \n sql = \"select max(counter) from req_for_quotation where date_part('year', datecreated) = date_part('year', CURRENT_DATE)\" \n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall() \n \n if result[0][0] is None:\n return 0\n else:\n return result[0][0]\n\n def getAllReqQuo(self):\n \n sql = \"select * from req_for_quotation order by (quotationnum)DESC\"\n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall()\n\n return result\n\n\n def generateReqNum(self):\n \n counter = RequestForQuotation.getMaxCounter(self) + 1\n\n maxCounter = str(counter).zfill(3)\n year = datetime.now().year\n suffix = float(str(year)[-3:]) if '.' in str(year)[-2:] else int(str(year)[-2:])\n return maxCounter+\"-\"+str(suffix)\n\n def getItemNumFromDescription(self, quotNum, description):\n sql = \"select itemnum from req_for_quotation_items where description = '\"+description+\"' and quotationnum = '\"+quotNum+\"';\"\n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall()\n\n return result[0][0] \n\n def getReqNumFromRefNum(self, refnum):\n sql = \"select quotationnum from req_for_quotation where refnum = '\"+refnum+\"'\"\n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall()\n \n if result == []:\n return None\n else:\n return result[0][0] \n \n def getReqNumFromRefNumAll(self, refnum):\n sql = \"select quotationnum from req_for_quotation where refnum = '\"+refnum+\"'\"\n\n cur.execute(sql)\n connection.commit()\n result = cur.fetchall()\n \n return result \n\nif __name__ == \"__main__\":\n r = RequestForQuotation()\n #r.addRequest('1234-1582', '12544','','','')\n #r.addComToReq('1234-1582', 'smi')\n #print(r.getReqItems('1234-1582'))\n #print(r.getMaxCounter())\n print(r.generateReqNum())\n\n", "sub_path": "Project_SMO_Inventory/static/src/core_scripts/request_for_quotation.py", "file_name": "request_for_quotation.py", "file_ext": "py", "file_size_in_byte": 5004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "connectdb.ConnectDB", "line_number": 15, "usage_type": "call"}, {"api_name": "purchase_request.PurchaseRequest", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "232149747", "text": "import os\n\nfrom celery import Celery\nfrom celery.schedules import crontab\nfrom django.core.management import call_command\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.local\")\n\napp = Celery(\"config\")\napp.config_from_object(\"django.conf:settings\", namespace=\"CELERY\")\napp.conf.timezone = \"Asia/Seoul\"\napp.autodiscover_tasks()\n\n\n@app.on_after_configure.connect\ndef setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(crontab(hour=0), dbbackup.s())\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print(f\"Request: {self.request!r}\")\n\n\n@app.task\ndef dbbackup():\n call_command(\"dbbackup\", \"--clean\")\n", "sub_path": "app/config/celery.py", "file_name": "celery.py", "file_ext": "py", "file_size_in_byte": 636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.environ.setdefault", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "celery.Celery", "line_number": 9, "usage_type": "call"}, {"api_name": "celery.schedules.crontab", "line_number": 17, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "293421741", "text": "import sqlite3\nimport pdb\n\nclass DataHand(object):\n \"\"\"docstring for DataHand\"\"\"\n def __init__(self, file = \"script\\\\xlsdata\\\\data.db\" ):\n super(DataHand, self).__init__()\n self.dbName = file\n\n def initTable(self,table):\n # localTime = str(int(localTime))\n # sqlTableName='TM'+localTime+'US'+username\n conn = sqlite3.connect(self.dbName)\n cursor = conn.cursor()\n try:\n #[('测试次数','通道数','波长','IL','ORL')]\n strEx='create table if not exists '+table+\\\n ' ( No varchar(10), channel varchar(10), wave varchar(10),IL varchar(10), ORL varchar(10))'\n cursor.execute(strEx)\n except Exception as e :\n raise e\n cursor.close()\n conn.commit()\n conn.close()\n # return sqlTableName\n\n def save2Sql(self,sqlTableName,data):\n conn = sqlite3.connect(self.dbName)\n cursor = conn.cursor()\n try:\n strEx = 'insert into {} (No, channel, wave, IL, ORL) values (\\'{}\\',\\'{}\\',\\'{}\\',\\'{}\\',\\'{}\\')'\n\n # change all the data into str\n strEx = strEx.format(sqlTableName,str(data[0]),data[1],\n data[2],data[3].decode('utf-8'),data[4].decode('utf-8'))\n # print(strEx)\n cursor.execute(strEx)\n except sqlite3.OperationalError as e :\n raise e\n except Exception as e:\n raise e\n cursor.close()\n conn.commit()\n conn.close()\n\n\n def getTableData(self,tableName):\n conn = sqlite3.connect(self.dbName)\n cursor = conn.cursor()\n try:\n strEx='select * from '+tableName\n cursor.execute(strEx)\n except sqlite3.OperationalError as e:\n print(e)\n\n data = cursor.fetchall()\n cursor.close()\n conn.commit()\n conn.close()\n return data\n\n\n def getTableDataList(self,tableName,listName):\n conn = sqlite3.connect(self.dbName)\n cursor = conn.cursor()\n datalist = []\n for x in listName:\n try:\n strEx = 'select {} from '+tableName\n strEx = strEx.format(x)\n cursor.execute(strEx)\n except sqlite3.OperationalError as e:\n print(e)\n\n datalist.append(cursor.fetchall())\n cursor.close()\n conn.commit()\n conn.close()\n return datalist\n", "sub_path": "script/datahand.py", "file_name": "datahand.py", "file_ext": "py", "file_size_in_byte": 2434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sqlite3.connect", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 38, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 72, "usage_type": "attribute"}]} +{"seq_id": "371449374", "text": "import sys\nimport time\nimport datetime\n\ndef starting_print_statement():\n \"\"\"Initialising print statement at beginning of Run Time.\"\"\"\n\n start_time = [time.time(), datetime.datetime.now()]\n print('Start Time:', start_time[1].strftime(\"%H:%M:%S\"))\n time_elapsed = time.time()\n\n return (start_time, time_elapsed)\n\ndef interim_print_statement(file, start_time, time_elapsed):\n \"\"\"Recurring print statement throughout Job.\"\"\"\n\n if datetime.datetime.now().strftime(\"%H:%M:%S\") != start_time[1].strftime(\"%H:%M:%S\"):\n current_job_time = time.time() - time_elapsed\n print('Current Time:', datetime.datetime.now().strftime(\"%H:%M:%S\"))\n print('Previous Year Manual Duration:', round(current_job_time / 60, 2), 'minutes')\n\n time_elapsed = time.time()\n print('Active File:', file)\n return time_elapsed\n\ndef time_elapsed_placeholder(start_time):\n \"\"\"Create placeholder for timestamps while searching for starting file.\"\"\"\n\n time_elapsed = time.time() - start_time[0]\n return time_elapsed\n\ndef concluding_print_statement(start_time, time_elapsed):\n \"\"\"Concluding print statement at end of Run Time.\"\"\"\n\n current_job_time = time.time() - time_elapsed\n elapsed_time = round(time.time() - start_time[0], 2)\n print('Previous Year Manual Duration:', round(current_job_time / 60, 2), 'minutes')\n print('Total Duration:', str(round(elapsed_time/60, 2)) + ' minutes')\n", "sub_path": "full_page_crop/codebase/RunTimeData.py", "file_name": "RunTimeData.py", "file_ext": "py", "file_size_in_byte": 1422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "time.time", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "223095318", "text": "import os\nfrom bert.run_pretraining import model_fn_builder, input_fn_builder\nfrom bert.modeling import BertConfig\n\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\nimport glob\nimport re\nimport pandas as pd\nimport numpy as np\nfrom multiprocessing import Process, current_process, cpu_count, Queue\n\ndef evaluatePretraining():\n FLAGS = flags.FLAGS\n\n input_file = \"./data/bert_pretraining_data/seq_128/validate/tf_examples.tfrecord*\"\n FLAGS.output_dir = \"./data\"\n # FLAGS.do_train = True\n # FLAGS.do_eval = True\n FLAGS.bert_config_file = './data/BERT/uncased_L-12_H-768_A-12/bert_config.json'\n # FLAGS.init_checkpoint = './data/BERT/uncased_L-12_H-768_A-12/bert_model.ckpt'\n FLAGS.train_batch_size = 32\n FLAGS.max_seq_length = 128\n FLAGS.max_predictions_per_seq = 20\n FLAGS.num_train_steps = 185000 # We take that number to iterate over the created checkpoints\n FLAGS.num_warmup_steps = 92500\n FLAGS.learning_rate = 2e-5\n FLAGS.eval_batch_size = 32\n FLAGS.save_checkpoints_steps = 18500\n FLAGS.iterations_per_loop = 1000\n FLAGS.max_eval_steps = 30560\n FLAGS.use_tpu = False\n FLAGS.tpu_name = None\n FLAGS.tpu_zone = None\n FLAGS.gcp_project = None\n FLAGS.master = None\n FLAGS.num_tpu_cores = None\n\n # index_file = './data/multiline_report_index_train.csv'\n\n FLAGS.mark_as_parsed()\n \n logging.set_verbosity(logging.INFO)\n\n # init_checkpoint = './data/BERT/uncased_L-12_H-768_A-12/bert_model.ckpt'\n\n os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n # if not FLAGS.do_train and not FLAGS.do_eval:\n # raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n bert_config = BertConfig.from_json_file(FLAGS.bert_config_file)\n\n input_files = []\n for input_pattern in input_file.split(\",\"):\n input_files.extend(tf.io.gfile.glob(input_pattern))\n\n logging.info(\"*** Input Files ***\")\n for input_file in input_files:\n logging.info(\" %s\" % input_file)\n\n tpu_cluster_resolver = None\n\n is_per_host = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.compat.v1.estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n # model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n output_df = pd.DataFrame()\n\n checkpoint_path = './data/BERT/uncased_L-12_H-768_A-12/bert_model.ckpt'\n result = _evaluateModel(checkpoint_path, bert_config, run_config, input_files)\n result[\"epoch\"] = 0\n result[\"checkpoint\"] = checkpoint_path\n output_df = output_df.append(result, ignore_index=True)\n logging.info(\"***Eval results***\")\n for key in sorted(result.keys()):\n logging.info(\" %s = %s\", key, str(result[key]))\n\n pretrained_checkpoints = './data/bert_pretraining_checkpoints/model.ckpt-*'\n pretrained_checkpoints_list = glob.glob(pretrained_checkpoints)\n\n for index in range(0, FLAGS.num_train_steps + 1):\n\n if [x for x in pretrained_checkpoints_list if re.search(f'.*model\\.ckpt-{index}\\..*', x)]:\n is_per_host = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.compat.v1.estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n checkpoint = f'./data/bert_pretraining_checkpoints/model.ckpt-{index}'\n result = _evaluateModel(checkpoint, bert_config, run_config, input_files)\n result[\"epoch\"] = int((index)/FLAGS.num_train_steps)\n result[\"checkpoint\"] = checkpoint\n output_df = output_df.append(result, ignore_index=True)\n logging.info(\"***Eval results***\")\n for key in sorted(result.keys()):\n logging.info(\" %s = %s\", key, str(result[key]))\n\n output_df.to_csv('./data/pretraining_evaluation_validation_data.csv', index=False)\n\n\ndef _evaluateModel(checkpoint, bert_config, run_config, input_files):\n\n FLAGS = flags.FLAGS\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.num_train_steps,\n num_warmup_steps=FLAGS.num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.compat.v1.estimator.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size)\n\n logging.info(\"***** Running evaluation *****\")\n logging.info(f\"Checkpoint: {checkpoint}\")\n logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n eval_input_fn = input_fn_builder(\n input_files=input_files,\n max_seq_length=FLAGS.max_seq_length,\n max_predictions_per_seq=FLAGS.max_predictions_per_seq,\n is_training=False)\n\n result = estimator.evaluate(\n input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)\n\n # output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n # with tf.io.gfile.GFile(output_eval_file, \"w\") as writer:\n # logging.info(\"***** Eval results *****\")\n # for key in sorted(result.keys()):\n # logging.info(\" %s = %s\", key, str(result[key]))\n # writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n return result\n\n\n\nif __name__ == \"__main__\":\n evaluatePretraining()", "sub_path": "evaluate_pretraining.py", "file_name": "evaluate_pretraining.py", "file_ext": "py", "file_size_in_byte": 6144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "absl.flags.FLAGS", "line_number": 15, "usage_type": "attribute"}, {"api_name": "absl.flags", "line_number": 15, "usage_type": "name"}, {"api_name": "absl.logging.set_verbosity", "line_number": 44, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 44, "usage_type": "name"}, {"api_name": "absl.logging.INFO", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 48, "usage_type": "attribute"}, {"api_name": "bert.modeling.BertConfig.from_json_file", "line_number": 53, "usage_type": "call"}, {"api_name": "bert.modeling.BertConfig", "line_number": 53, "usage_type": "name"}, {"api_name": "tensorflow.io.gfile.glob", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 57, "usage_type": "attribute"}, {"api_name": "absl.logging.info", "line_number": 59, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 59, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 61, "usage_type": "name"}, {"api_name": "tensorflow.compat", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.estimator.tpu.RunConfig", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.estimator.tpu.TPUConfig", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 83, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 83, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 85, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 88, "usage_type": "call"}, {"api_name": "re.search", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.estimator.tpu.RunConfig", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.estimator.tpu.TPUConfig", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 99, "usage_type": "attribute"}, {"api_name": "absl.logging.info", "line_number": 109, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 109, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 111, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 111, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS", "line_number": 118, "usage_type": "attribute"}, {"api_name": "absl.flags", "line_number": 118, "usage_type": "name"}, {"api_name": "bert.run_pretraining.model_fn_builder", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.estimator.tpu.TPUEstimator", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 130, "usage_type": "attribute"}, {"api_name": "absl.logging.info", "line_number": 137, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 137, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 138, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 138, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 139, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 139, "usage_type": "name"}, {"api_name": "bert.run_pretraining.input_fn_builder", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "497001465", "text": "# -*- coding: utf-8 -*-\nfrom re import sub\n\nimport scrapy\n\nfrom sherlock import items\nfrom sherlock.lib import Config, Wikidot, regex\n\n\nclass TitlesSpider(scrapy.Spider):\n name = 'titles'\n allowed_domains = ['wikidot.com']\n\n def __init__(self, site=None, *args, **kwargs):\n super(TitlesSpider, self).__init__(*args, **kwargs)\n\n Config.check(site)\n\n self.info = {\n \"branch_id\": Config.get(site, 'id')\n }\n\n paths = Config.get(site, 'index')\n self.start_urls = [Wikidot.path(site, slug) for slug in paths]\n\n def parse(self, response):\n for title in response.css('.content-panel ul a:not(.newpage)'):\n item = items.Title(branch_id=self.info['branch_id'])\n\n item['subtitle'] = sub(regex['scp_title'], '', title.xpath(\n 'string(./ancestor::li)').get()).strip()\n item['slug'] = title.css('::attr(href)').get()[1:]\n\n yield item\n", "sub_path": "sherlock/spiders/titles.py", "file_name": "titles.py", "file_ext": "py", "file_size_in_byte": 951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scrapy.Spider", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sherlock.lib.Config.check", "line_number": 17, "usage_type": "call"}, {"api_name": "sherlock.lib.Config", "line_number": 17, "usage_type": "name"}, {"api_name": "sherlock.lib.Config.get", "line_number": 20, "usage_type": "call"}, {"api_name": "sherlock.lib.Config", "line_number": 20, "usage_type": "name"}, {"api_name": "sherlock.lib.Config.get", "line_number": 23, "usage_type": "call"}, {"api_name": "sherlock.lib.Config", "line_number": 23, "usage_type": "name"}, {"api_name": "sherlock.lib.Wikidot.path", "line_number": 24, "usage_type": "call"}, {"api_name": "sherlock.lib.Wikidot", "line_number": 24, "usage_type": "name"}, {"api_name": "sherlock.items.Title", "line_number": 28, "usage_type": "call"}, {"api_name": "sherlock.items", "line_number": 28, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "sherlock.lib.regex", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "379522006", "text": "# noinspection PyPackageRequirements\nimport asyncio\nimport traceback\n\nfrom google.protobuf.json_format import MessageToDict\n\nfrom lnd_grpc.lnd_client import LndClient\nfrom lnd_sql.data_schemas.lnd.graph import channel_graph_schema\nfrom lnd_sql.queries.lnd.graph_database import GraphDatabase\nfrom lnd_sql.logger import log\n\n\nclass GraphWorker(object):\n def __init__(self, lnd_client: LndClient):\n self.lnd_client = lnd_client\n self.db = GraphDatabase()\n\n def upsert_all(self):\n log.info('Upserting the whole public Lightning graph')\n channel_graph = self.lnd_client.describe_graph()\n channel_graph_data = MessageToDict(channel_graph)\n validated_channel_graph_data = channel_graph_schema.load(channel_graph_data)\n self.db.upsert_nodes(validated_channel_graph_data['nodes'], mark_deleted=True)\n self.db.upsert_edges(validated_channel_graph_data['edges'], mark_deleted=True)\n\n async def run(self):\n minutes = 30\n while True:\n try:\n self.upsert_all()\n except Exception:\n log.exception('Upsert graph error', lnd_client=self.lnd_client)\n log.info(f'Graph upsert completed or errored, going to sleep for {minutes} minutes')\n await asyncio.sleep(60*minutes)", "sub_path": "lnd_sql/workers/graph_worker.py", "file_name": "graph_worker.py", "file_ext": "py", "file_size_in_byte": 1302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "lnd_grpc.lnd_client.LndClient", "line_number": 14, "usage_type": "name"}, {"api_name": "lnd_sql.queries.lnd.graph_database.GraphDatabase", "line_number": 16, "usage_type": "call"}, {"api_name": "lnd_sql.logger.log.info", "line_number": 19, "usage_type": "call"}, {"api_name": "lnd_sql.logger.log", "line_number": 19, "usage_type": "name"}, {"api_name": "google.protobuf.json_format.MessageToDict", "line_number": 21, "usage_type": "call"}, {"api_name": "lnd_sql.data_schemas.lnd.graph.channel_graph_schema.load", "line_number": 22, "usage_type": "call"}, {"api_name": "lnd_sql.data_schemas.lnd.graph.channel_graph_schema", "line_number": 22, "usage_type": "name"}, {"api_name": "lnd_sql.logger.log.exception", "line_number": 32, "usage_type": "call"}, {"api_name": "lnd_sql.logger.log", "line_number": 32, "usage_type": "name"}, {"api_name": "lnd_sql.logger.log.info", "line_number": 33, "usage_type": "call"}, {"api_name": "lnd_sql.logger.log", "line_number": 33, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "361627459", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('product', '0017_auto_20140907_1435'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='alcohol_tags',\n field=models.CharField(default=b'SP', max_length=2, choices=[(b'GI', b'Gin'), (b'VO', b'Vodka'), (b'WH', b'Whiskey'), (b'SP', b'Special')]),\n preserve_default=True,\n ),\n ]\n", "sub_path": "mysite/product/migrations/0018_product_alcohol_tags.py", "file_name": "0018_product_alcohol_tags.py", "file_ext": "py", "file_size_in_byte": 544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "494682552", "text": "import os, datetime\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template, util\nfrom google.appengine.ext.webapp.util import login_required\nfrom google.appengine.api import mail\n\n# must import template before importing django stuff\nimport django.core.validators\nfrom google.appengine.ext.db import djangoforms\ntry:\n from django import newforms as forms\nexcept ImportError:\n from django import forms\nimport django.core.exceptions\ntry:\n from django.utils.safestring import mark_safe\nexcept ImportError:\n def mark_safe(s):\n return s\n\nimport settings\nfrom datastore import *\nfrom imokutils import *\nfrom imokforms import *\n\nclass IntroHandler(RequestHandlerPlus):\n def get(self):\n if users.get_current_user():\n logout_url = users.create_logout_url(\"/\")\n else:\n mustLogIn = \"True\" # this is so the navigation bar only shows the relevant things.\n login_url = users.create_login_url(\"/home\")\n #loginOutUrl = users.create_login_url(self.request.uri)\n\n self.render('intro.html', self.getContext(locals()))\n\nclass AboutHandler(RequestHandlerPlus):\n def get(self):\n if users.get_current_user():\n logout_url = users.create_logout_url(\"/\")\n else:\n mustLogIn = \"True\" # this is so the navigation bar only shows the relevant things.\n login_url = users.create_login_url(\"/home\")\n\n self.render('about.html', self.getContext(locals()))\n\nclass MessageHandler(RequestHandlerPlus):\n def get(self):\n if users.get_current_user():\n logout_url = users.create_logout_url(\"/\")\n else:\n mustLogIn = \"True\" # this is so the navigation bar only shows the relevant things.\n login_url = users.create_login_url(\"/home\")\n\n unique_id = self.request.get('unique_id')\n idQuery = Post.all().filter('unique_id = ', unique_id)\n idMessage = idQuery.get()\n lat = str(idMessage.lat)\n lon = str(idMessage.lon)\n dateTime = str(idMessage.datetime)\n user = ImokUser.all().filter('account = ', idMessage.user).get()\n key = settings.MAPS_KEY\n\n self.render('message.html', self.getContext(locals()))\n\nclass EditProfileHandler(RequestHandlerPlus):\n @login_required\n def get(self):\n user = users.get_current_user()\n profile = getProfile(True)\n phone = getPhone()\n if phone:\n initial = dict(phoneNumber=phone.number_str())\n else:\n initial = None\n form = UserProfileForm(instance=profile, initial=initial)\n self.render('editProfile.html', self.getContext(locals()))\n\n def post(self):\n user = users.get_current_user()\n profile = getProfile(True)\n form = UserProfileForm(data=self.request.POST, instance=profile)\n if form.is_valid():\n phoneChanged = form.saveWithPhone()\n if phoneChanged:\n self.redirect('/phone/verify')\n else:\n self.redirect('/home')\n else:\n # Reprint the form\n self.render('editProfile.html', self.getContext(locals()))\n\nclass HomeHandler(RequestHandlerPlus):\n @login_required\n def get(self):\n user = users.get_current_user()\n profile = getProfile()\n if not profile:\n self.redirect('/newuser/profile')\n\n phone = getPhone()\n if phone and not phone.verified:\n banner = mark_safe('You must
finish verifying your phone number before you can post messages.')\n\n # profile widget\n phonesQuery = Phone.all().filter('user = ', user)\n phones = phonesQuery.fetch(1)\n\n # emails widget\n emailsQuery = RegisteredEmail.all().filter('userName = ', user)\n emails = emailsQuery.fetch(5)\n numEmailsNotShown = emailsQuery.count() - len(emails)\n\n # recent messages widget\n postsQuery = Post.all().filter('user = ', user).order('-datetime')\n posts = postsQuery.fetch(10)\n numPosts = postsQuery.count()\n numPostsNotShown = numPosts - len(posts)\n \n self.render('home.html', self.getContext(locals()))\n\nclass GetInvolvedHandler(RequestHandlerPlus):\n def get(self):\n if users.get_current_user():\n logout_url = users.create_logout_url(\"/\")\n else:\n mustLogIn = \"True\" # this is so the navigation bar only shows the relevant things.\n login_url = users.create_login_url(\"/home\")\n\n self.render('getInvolved.html', self.getContext(locals()))\n\nclass RegisterEmailHandler(RequestHandlerPlus):\n @login_required\n def get(self):\n registeredEmailQuery = RegisteredEmail.all().filter('userName =', users.get_current_user()).order('emailAddress')\n registeredEmailList = registeredEmailQuery.fetch(100)\n self.render('register_email.html', self.getContext(locals()))\n\n def post(self):\n if users.get_current_user():\n newEmail = RegisteredEmail()\n newEmail.userName = users.get_current_user()\n success = True\n tempEmailString = self.request.get('emailAddress')\n newEmail.emailAddress = tempEmailString\n try:\n django.core.validators.isValidEmail(tempEmailString, None)\n except django.core.validators.ValidationError:\n addError = 'Please enter a valid e-mail address.'\n else:\n newEmail.put()\n registeredEmailQuery = RegisteredEmail.all().filter('userName =', users.get_current_user()).order('emailAddress')\n registeredEmailList = registeredEmailQuery.fetch(100)\n self.render('register_email.html', self.getContext(locals()))\n\nclass RemoveRegisteredEmailHandler(RequestHandlerPlus):\n def post(self):\n if users.get_current_user():\n removeEmail = self.request.get('emailAddress')\n removeEmailQuery = RegisteredEmail.all().filter('userName =', users.get_current_user()).filter('emailAddress =', removeEmail)\n removeEmailList = removeEmailQuery.get()\n if removeEmailList:\n removeEmailList.delete()\n \n self.redirect(self.request.get('returnAddr'))\n\n\nclass DownloadsHandler(RequestHandlerPlus):\n @login_required\n def get(self):\n self.render('download.html', self.getContext(locals()))\n \nclass VerifyPhoneHandler(RequestHandlerPlus):\n @login_required\n def get(self):\n phone = getPhone()\n if not phone:\n self.redirect('/home')\n return\n\n self.render('verifyPhone.html', self.getContext(locals()))\n\n def post(self):\n if not users.get_current_user():\n self.redirect('/')\n return\n\n redir_location = self.request.get('continue', '/home')\n\n phone = getPhone()\n if not phone:\n self.redirect(redir_location)\n return\n\n # Generate a code\n phone.code = Phone.generate_code()\n message = \"Verification Code: %s\" % phone.code\n sms = SmsMessage(phone_number=phone.number, \n message=message)\n db.put([sms, phone])\n\n self.redirect(redir_location)\n\nclass ConfirmPhoneHandler(RequestHandlerPlus):\n @login_required\n def get(self):\n phone = getPhone()\n if not phone:\n self.redirect('/home')\n return\n\n self.render('confirmPhone.html', self.getContext(locals()))\n\n def post(self):\n if not users.get_current_user():\n self.redirect('/')\n return\n\n phone = getPhone()\n if not phone:\n self.redirect('/home')\n return\n\n errorlist = []\n code = self.request.get('code', '')\n if not code:\n errorlist.append('Must enter a code')\n elif len(code) != 4:\n errorlist.append('Code is only 4 digits')\n elif code != phone.code:\n errorlist.append('Incorrect code')\n\n if errorlist:\n self.render('confirmPhone.html', self.getContext(locals()))\n return\n\n phone.code = ''\n phone.verified = True\n phone.put()\n\n self.redirect('/home')\n\n\ndef main():\n application = webapp.WSGIApplication([\n ('/', IntroHandler),\n ('/home', HomeHandler),\n ('/about', AboutHandler),\n ('/message', MessageHandler),\n ('/getInvolved', GetInvolvedHandler),\n\n # must be logged in for these...\n ('/email', RegisterEmailHandler),\n #('/email/add', AddRegisteredEmailHandler),\n ('/email/remove', RemoveRegisteredEmailHandler),\n ('/phone/verify', VerifyPhoneHandler),\n ('/phone/confirm', ConfirmPhoneHandler),\n ('/profile/edit', EditProfileHandler),\n ('/download', DownloadsHandler),\n \n ], debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "google.appengine.api.users.get_current_user", "line_number": 30, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 30, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 31, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 31, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 34, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 34, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 41, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 41, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 42, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 42, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 45, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 45, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 51, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 51, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 52, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 52, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 55, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 55, "usage_type": "name"}, {"api_name": "settings.MAPS_KEY", "line_number": 64, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 71, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 71, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 69, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 82, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 82, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 98, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 98, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 105, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 96, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 126, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 126, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 127, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 127, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 130, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 130, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 137, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 137, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 135, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 142, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 142, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 144, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 144, "usage_type": "name"}, {"api_name": "django.core.validators.core.validators.isValidEmail", "line_number": 149, "usage_type": "call"}, {"api_name": "django.core.validators.core", "line_number": 149, "usage_type": "attribute"}, {"api_name": "django.core.validators", "line_number": 149, "usage_type": "name"}, {"api_name": "django.core.validators.core", "line_number": 150, "usage_type": "attribute"}, {"api_name": "django.core.validators", "line_number": 150, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 154, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 154, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 160, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 160, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 162, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 162, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 171, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 176, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 186, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 186, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 207, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 217, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 217, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.WSGIApplication", "line_number": 247, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp", "line_number": 247, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.run_wsgi_app", "line_number": 264, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.util", "line_number": 264, "usage_type": "name"}]} +{"seq_id": "390397143", "text": "import os\nimport pandas as pd\nfrom pathlib import Path\nimport shutil\n\n\nmimic = False\n\n\nif mimic:\n fn_assign = '/g/ssli/transitory/lybarger/clinicalIE/analyses/20191119_ActLearn_AB_full/step010_corpora/social_det/attribute_by_doc.csv'\n source = '/g/ssli/transitory/lybarger/clinicalIE/data/social_determinants/'\n dest = '/g/ssli/transitory/lybarger/clinicalIE/data/social_determinants/03_mimic_discharge_split'\n focus = '03_mimic_discharge'\n \nelse: \n fn_assign = '/g/ssli/transitory/lybarger/clinicalIE/analyses/20191119_ActLearn_AB_full/step010_corpora/yvnotes/attribute_by_doc.csv'\n source = '/g/ssli/transitory/lybarger/clinicalIE/repo/data/YVnotesMod/'\n dest = '/g/ssli/transitory/lybarger/clinicalIE/repo/data/YVnotesReorg/'\n\n\n\ndf = pd.read_csv(fn_assign)\n\nif mimic:\n df = df[df['id'].str.contains(focus)]\n\nprint(\"Data frame length: {}\".format(len(df)))\n\nids = df['id'].tolist()\nsubsets = df['subset'].tolist()\n\n\nassert len(ids) == len(subsets)\n\n# Clear destination directory\nif os.path.exists(dest):\n shutil.rmtree(dest)\n\nfiles_orig = []\nfor id_, subset in zip(ids, subsets):\n \n # Pattern matching ID\n fn = os.path.basename(id_)\n pat = '{}.*'.format(fn)\n path = os.path.dirname(id_)\n dir_ = os.path.join(source, path)\n \n # Files matching pattern\n files = list(Path(dir_).rglob(pat))\n \n # Check count of files found\n assert len(files) == 2, len(files)\n \n # Destination directory\n #dir_ = os.path.join(dest, subset)\n dir_ = os.path.join(dest, subset, path)\n \n # Create destination directory\n if not os.path.exists(dir_):\n os.makedirs(dir_) \n \n # Iterate over files, and move\n for f in files:\n\n shutil.copy(f, dir_)\n files_orig.append(os.path.basename(f))\n\n# Files matching pattern\nfiles = list(Path(source).rglob('*.conf'))\nfor f in files:\n shutil.copy(f, dest)\n files_orig.append(os.path.basename(f))\n \n \nfiles_new = [os.path.basename(f) for f in Path(dest).rglob('*.*')]\n\nassert len(files_orig) == len(files_new), '{} vs {}'.format(len(files_orig), len(files_new))\nfiles_orig.sort()\nfiles_new.sort()\nassert files_orig == files_new\n\n\nsubsets = list(set(subsets))\nfor subset in subsets:\n dir_ = os.path.join(dest, subset)\n files = list(Path(dir_).rglob('*.*'))\n print(subset, len(files), len(files)/2)", "sub_path": "code/utils/reorg_files.py", "file_name": "reorg_files.py", "file_ext": "py", "file_size_in_byte": 2364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 61, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 70, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "143347965", "text": "import unittest\nfrom smartdispatch import smartdispatch_script\nimport subprocess\nfrom mock import patch\nimport tempfile as tmp\nimport shutil\n\n\nclass TestSmartScript(unittest.TestCase):\n\n def setUp(self):\n self._base_dir = tmp.mkdtemp()\n smartdispatch_script.LOGS_FOLDERNAME = self._base_dir\n\n def tearDown(self):\n shutil.rmtree(self._base_dir)\n\n def test_gpu_check(self):\n\n argv = ['-x', '-g', '2', '-G', '1', '-q', 'gpu_1', 'launch', 'echo', 'testing123']\n\n with self.assertRaises(SystemExit) as context:\n smartdispatch_script.main(argv=argv)\n\n self.assertTrue(context.exception.code, 2)\n\n def test_cpu_check(self):\n\n argv = ['-x', '-c', '2', '-C', '1', '-q', 'gpu_1', 'launch', 'echo', 'testing123']\n\n with self.assertRaises(SystemExit) as context:\n smartdispatch_script.main(argv=argv)\n\n self.assertTrue(context.exception.code, 2)\n\n @patch('subprocess.check_output')\n def test_launch_job_check(self, mock_check_output):\n\n mock_check_output.side_effect = subprocess.CalledProcessError(1, 1, \"A wild error appeared!\")\n argv = ['-q', 'gpu_1', 'launch', 'echo', 'testing123']\n\n try:\n with self.assertRaises(SystemExit) as context:\n smartdispatch_script.main(argv=argv)\n\n self.assertTrue(context.exception.code, 2)\n\n except subprocess.CalledProcessError:\n self.fail(\"smartdispatch_script.main() raised CalledProcessError unexpectedly!\")\n", "sub_path": "smartdispatch/tests/test_smartdispatch_script.py", "file_name": "test_smartdispatch_script.py", "file_ext": "py", "file_size_in_byte": 1518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 12, "usage_type": "call"}, {"api_name": "smartdispatch.smartdispatch_script.LOGS_FOLDERNAME", "line_number": 13, "usage_type": "attribute"}, {"api_name": "smartdispatch.smartdispatch_script", "line_number": 13, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 16, "usage_type": "call"}, {"api_name": "smartdispatch.smartdispatch_script.main", "line_number": 23, "usage_type": "call"}, {"api_name": "smartdispatch.smartdispatch_script", "line_number": 23, "usage_type": "name"}, {"api_name": "smartdispatch.smartdispatch_script.main", "line_number": 32, "usage_type": "call"}, {"api_name": "smartdispatch.smartdispatch_script", "line_number": 32, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 39, "usage_type": "call"}, {"api_name": "smartdispatch.smartdispatch_script.main", "line_number": 44, "usage_type": "call"}, {"api_name": "smartdispatch.smartdispatch_script", "line_number": 44, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 48, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "332661721", "text": "from django import template\n\nfrom groups.models import Fraction, Group, Rarity, Line\n\nregister = template.Library()\n\n@register.inclusion_tag('groups/cards_filter.html')\ndef groups_filter():\n qs1 = Fraction.objects.all()\n qs2 = Group.objects.all()\n qs3 = Rarity.objects.all()\n qs4 = Line.objects.all()\n return {'factions': qs1, 'groups': qs2, 'rarities': qs3, 'lines': qs4}\n\n\n\n@register.inclusion_tag('groups/builder_filter.html')\ndef groups_builder_filter():\n qs2 = Group.objects.all()\n qs3 = Rarity.objects.all()\n qs4 = Line.objects.all()\n return {'groups': qs2, 'rarities': qs3, 'lines': qs4}\n", "sub_path": "groups/templatetags/groups_filter.py", "file_name": "groups_filter.py", "file_ext": "py", "file_size_in_byte": 622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.template.Library", "line_number": 5, "usage_type": "call"}, {"api_name": "django.template", "line_number": 5, "usage_type": "name"}, {"api_name": "groups.models.Fraction.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "groups.models.Fraction.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "groups.models.Fraction", "line_number": 9, "usage_type": "name"}, {"api_name": "groups.models.Group.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "groups.models.Group.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "groups.models.Group", "line_number": 10, "usage_type": "name"}, {"api_name": "groups.models.Rarity.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "groups.models.Rarity.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "groups.models.Rarity", "line_number": 11, "usage_type": "name"}, {"api_name": "groups.models.Line.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "groups.models.Line.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "groups.models.Line", "line_number": 12, "usage_type": "name"}, {"api_name": "groups.models.Group.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "groups.models.Group.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "groups.models.Group", "line_number": 19, "usage_type": "name"}, {"api_name": "groups.models.Rarity.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "groups.models.Rarity.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "groups.models.Rarity", "line_number": 20, "usage_type": "name"}, {"api_name": "groups.models.Line.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "groups.models.Line.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "groups.models.Line", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "447386536", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport subprocess\n\nmass_adjust = [1,10,1000] # The mass adjustments for Jupiter\nfig, axs = plt.subplots(1, 3, sharex=True, sharey=True) # Set figure.\nfig.suptitle('Three body problem with different Jupiter masses')\nfor n in range(len(mass_adjust)):\n subprocess.run(['./2.x', str(mass_adjust[n])])\n Nobjects = 3 # Antall planeter vi plotter for.\n # Skriver om til dataframe:\n df = pd.read_csv(\"Textfiles/PlanetsVV_3.txt\", delim_whitespace=True, \\\n index_col=False, names=[\"t\",\"n\",\"m\",\"x\",\"y\",\"z\",\"vx\",\"vy\",\"vz\"]) # Leser av tekstfilen.\n planet_dfs = [] # legger verdiene til denne.\n for guy in df.groupby(\"n\"): # Her grupperer vi etter hvilke objekt vi ser på n.\n planet_dfs.append(guy)\n name = ['Jupiter', 'Earth', 'Sun'] # Navn på objektene.\n # PLotting:\n for i in range(Nobjects):\n object = planet_dfs[i][1]\n axs[n].plot(object['x'], object['y'], label=f'{name[i]}')\n axs[n].set_xlabel('x (AU)')\n axs[n].set_ylabel('y (AU)')\n axs[n].title.set_text(f'Jupiter mass * {str(mass_adjust[n])}')\nplt.legend()\nplt.show()\n", "sub_path": "Project3/Prosjekt3_kode/Plotte2.py", "file_name": "Plotte2.py", "file_ext": "py", "file_size_in_byte": 1148, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "577485326", "text": "from __future__ import print_function\nimport random\nimport time\nimport sys\nimport os\nimport pygame\nimport snake\nfrom pygame.locals import *\n\nP2 = (171, 240, 0)\nP1 = (226, 102, 183)\nP3 = (197, 0, 128)\nP4 = (226, 59, 167)\nBGCOLOR = P1\n\nclass Game:\n 'Game where snake eats snacks'\n def __init__(self, cell=20, height=480, width=640):\n self.score = 0\n self.speed = 0.3\n self.height = height\n self.width = width\n self.cell = cell\n self.run = True\n self.startSpeed = self.speed\n\n pygame.init()\n self.display = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption('Python Eats Snacks')\n\n self.Main()\n\n def CreateNewSnake(self):\n 'Creates new Snake object'\n randx = random.randint(5,(self.width/self.cell)-5)\n randy = random.randint(5,(self.height/self.cell)-5)\n return snake.Snake(randx*self.cell, randy*self.cell)\n def CreateNewFoodItem(self):\n 'Creates new food item'\n self.foodx = (random.randint(5,(self.width/self.cell)-5))*self.cell\n self.foody = (random.randint(5,(self.height/self.cell)-5))*self.cell\n for i in self.snake.positions:\n if self.foodx == i[0] and self.foody == i[1]:\n self.CreateNewFoodItem()\n def DrawField(self):\n pygame.draw.line(self.display, P4, (self.cell-(self.cell/2)-1, 0), (self.cell-(self.cell/2)-1, self.height), self.cell)\n pygame.draw.line(self.display, P4, (0, (self.cell-(self.cell/2)-1)), (self.width, (self.cell-(self.cell/2)-1)), self.cell)\n pygame.draw.line(self.display, P4, (self.width-self.cell+(self.cell/2)-1, 0), (self.width-self.cell+(self.cell/2)-1, self.height), self.cell)\n pygame.draw.line(self.display, P4, (0, self.height-self.cell+(self.cell/2)-1), (self.width, self.height-self.cell+(self.cell/2)-1), self.cell)\n def DrawSnake(self):\n for i in self.snake.positions:\n x = i[0]\n y = i[1]\n snakeSegmentRect = pygame.Rect(x, y, self.cell, self.cell)\n pygame.draw.rect(self.display, P3, snakeSegmentRect)\n snakeInnerSegmentRect = pygame.Rect(x + 4, y + 4, self.cell - 8, self.cell - 8)\n pygame.draw.rect(self.display, P3, snakeInnerSegmentRect)\n def drawFoodItem(self):\n foodItem = pygame.Rect(self.foodx, self.foody, self.cell, self.cell)\n pygame.draw.rect(self.display, P2, foodItem)\n def Collision(self):\n if self.snake.x == self.foodx and self.snake.y == self.foody:\n self.score += 1\n self.speed += -0.005\n self.snake.size += 1\n self.CreateNewFoodItem()\n if self.snake.x == self.width-(self.cell*2) or self.snake.y == self.height-(self.cell*2) or self.snake.x == self.cell or self.snake.y == self.cell:\n self.run = False\n for k, i in enumerate(self.snake.positions):\n if i[0] == self.snake.x and i[1] == self.snake.y and k != len(self.snake.positions)-1:\n self.run = False\n if self.run == False:\n os.system('clear')\n print('')\n print(' --- SCORE: %s --- ' % (self.score))\n print(' === GAME OVER === ')\n print('')\n print('')\n os._exit(1)\n def Main(self):\n 'Main function'\n self.snake = self.CreateNewSnake()\n self.CreateNewFoodItem()\n self.Loop()\n\n def Loop(self):\n thisTime = time.time()\n while self.run:\n for event in pygame.event.get():\n if event.type == QUIT:\n os._exit(1)\n elif event.type == KEYDOWN:\n self.snake.ChangeDirection(event.key)\n\n if time.time() - thisTime >= self.speed:\n self.Collision()\n self.snake.Move(self.cell)\n if self.run:\n self.display.fill(BGCOLOR)\n self.DrawField()\n self.drawFoodItem()\n self.DrawSnake()\n thisTime = time.time()\n pygame.display.update()", "sub_path": "gameP.py", "file_name": "gameP.py", "file_ext": "py", "file_size_in_byte": 4128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pygame.init", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 29, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "snake.Snake", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 73, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 79, "usage_type": "call"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 91, "usage_type": "call"}, {"api_name": "time.time", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 104, "usage_type": "attribute"}]} +{"seq_id": "231951139", "text": "from dask.utils import raises\nfrom dask.core import istask, get, get_dependencies, cull, flatten, fuse, subs\n\n\ndef contains(a, b):\n \"\"\"\n\n >>> contains({'x': 1, 'y': 2}, {'x': 1})\n True\n >>> contains({'x': 1, 'y': 2}, {'z': 3})\n False\n \"\"\"\n return all(a.get(k) == v for k, v in b.items())\n\ndef inc(x):\n return x + 1\n\ndef add(x, y):\n return x + y\n\n\ndef test_istask():\n assert istask((inc, 1))\n assert not istask(1)\n assert not istask((1, 2))\n\n\nd = {':x': 1,\n ':y': (inc, ':x'),\n ':z': (add, ':x', ':y')}\n\n\ndef test_get():\n assert get(d, ':x') == 1\n assert get(d, ':y') == 2\n assert get(d, ':z') == 3\n\n\ndef test_memoized_get():\n try:\n import toolz\n except ImportError:\n return\n cache = dict()\n getm = toolz.memoize(get, cache=cache, key=lambda args, kwargs: args[1:])\n\n result = getm(d, ':z', get=getm)\n assert result == 3\n\n assert contains(cache, {(':x',): 1,\n (':y',): 2,\n (':z',): 3})\n\ndef test_data_not_in_dict_is_ok():\n d = {'x': 1, 'y': (add, 'x', 10)}\n assert get(d, 'y') == 11\n\n\ndef test_get_with_list():\n d = {'x': 1, 'y': 2, 'z': (sum, ['x', 'y'])}\n\n assert get(d, ['x', 'y']) == [1, 2]\n assert get(d, 'z') == 3\n\n\ndef test_get_with_nested_list():\n d = {'x': 1, 'y': 2, 'z': (sum, ['x', 'y'])}\n\n assert get(d, [['x'], 'y']) == [[1], 2]\n assert get(d, 'z') == 3\n\n\ndef test_get_works_with_unhashables_in_values():\n f = lambda x, y: x + len(y)\n d = {'x': 1, 'y': (f, 'x', set([1]))}\n\n assert get(d, 'y') == 2\n\n\ndef test_get_laziness():\n def isconcrete(arg):\n return isinstance(arg, list)\n\n d = {'x': 1, 'y': 2, 'z': (isconcrete, ['x', 'y'])}\n\n assert get(d, ['x', 'y']) == [1, 2]\n assert get(d, 'z') == False\n\n\ndef test_get_dependencies_nested():\n dsk = {'x': 1, 'y': 2,\n 'z': (add, (inc, [['x']]), 'y')}\n\n assert get_dependencies(dsk, 'z') == set(['x', 'y'])\n\n\ndef test_get_dependencies_empty():\n dsk = {'x': (inc,)}\n assert get_dependencies(dsk, 'x') == set()\n\n\ndef test_nested_tasks():\n d = {'x': 1,\n 'y': (inc, 'x'),\n 'z': (add, (inc, 'x'), 'y')}\n\n assert get(d, 'z') == 4\n\n\ndef test_cull():\n # 'out' depends on 'x' and 'y', but not 'z'\n d = {'x': 1, 'y': (inc, 'x'), 'z': (inc, 'x'), 'out': (add, 'y', 10)}\n culled = cull(d, 'out')\n assert culled == {'x': 1, 'y': (inc, 'x'), 'out': (add, 'y', 10)}\n assert cull(d, 'out') == cull(d, ['out'])\n assert cull(d, ['out', 'z']) == d\n assert raises(KeyError, lambda: cull(d, 'badkey'))\n\n\ndef test_flatten():\n assert list(flatten(())) == []\n\n\ndef test_subs():\n assert subs((sum, [1, 'x']), 'x', 2) == (sum, [1, 2])\n assert subs((sum, [1, ['x']]), 'x', 2) == (sum, [1, [2]])\n\n\ndef test_fuse():\n assert fuse({\n 'w': (inc, 'x'),\n 'x': (inc, 'y'),\n 'y': (inc, 'z'),\n 'z': (add, 'a', 'b'),\n 'a': 1,\n 'b': 2,\n }) == {\n 'w': (inc, (inc, (inc, (add, 'a', 'b')))),\n 'a': 1,\n 'b': 2,\n }\n assert fuse({\n 'NEW': (inc, 'y'),\n 'w': (inc, 'x'),\n 'x': (inc, 'y'),\n 'y': (inc, 'z'),\n 'z': (add, 'a', 'b'),\n 'a': 1,\n 'b': 2,\n }) == {\n 'NEW': (inc, 'y'),\n 'w': (inc, (inc, 'y')),\n 'y': (inc, (add, 'a', 'b')),\n 'a': 1,\n 'b': 2,\n }\n assert fuse({\n 'v': (inc, 'y'),\n 'u': (inc, 'w'),\n 'w': (inc, 'x'),\n 'x': (inc, 'y'),\n 'y': (inc, 'z'),\n 'z': (add, 'a', 'b'),\n 'a': (inc, 'c'),\n 'b': (inc, 'd'),\n 'c': 1,\n 'd': 2,\n }) == {\n 'u': (inc, (inc, (inc, 'y'))),\n 'v': (inc, 'y'),\n 'y': (inc, (add, 'a', 'b')),\n 'a': (inc, 1),\n 'b': (inc, 2),\n }\n assert fuse({\n 'a': (inc, 'x'),\n 'b': (inc, 'x'),\n 'c': (inc, 'x'),\n 'd': (inc, 'c'),\n 'x': (inc, 'y'),\n 'y': 0,\n }) == {\n 'a': (inc, 'x'),\n 'b': (inc, 'x'),\n 'd': (inc, (inc, 'x')),\n 'x': (inc, 0),\n }\n assert fuse({\n 'a': 1,\n 'b': (inc, 'a'),\n 'c': (add, 'b', 'b')\n }) == {\n 'b': (inc, 1),\n 'c': (add, 'b', 'b')\n }\n\n", "sub_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_ext": "py", "file_size_in_byte": 4256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "dask.core.istask", "line_number": 23, "usage_type": "call"}, {"api_name": "dask.core.istask", "line_number": 24, "usage_type": "call"}, {"api_name": "dask.core.istask", "line_number": 25, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 34, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 35, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 36, "usage_type": "call"}, {"api_name": "toolz.memoize", "line_number": 45, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 45, "usage_type": "argument"}, {"api_name": "dask.core.get", "line_number": 56, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 62, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 63, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 69, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 70, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 77, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 86, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 87, "usage_type": "call"}, {"api_name": "dask.core.get_dependencies", "line_number": 94, "usage_type": "call"}, {"api_name": "dask.core.get_dependencies", "line_number": 99, "usage_type": "call"}, {"api_name": "dask.core.get", "line_number": 107, "usage_type": "call"}, {"api_name": "dask.core.cull", "line_number": 113, "usage_type": "call"}, {"api_name": "dask.core.cull", "line_number": 115, "usage_type": "call"}, {"api_name": "dask.core.cull", "line_number": 116, "usage_type": "call"}, {"api_name": "dask.utils.raises", "line_number": 117, "usage_type": "call"}, {"api_name": "dask.core.cull", "line_number": 117, "usage_type": "call"}, {"api_name": "dask.core.flatten", "line_number": 121, "usage_type": "call"}, {"api_name": "dask.core.subs", "line_number": 125, "usage_type": "call"}, {"api_name": "dask.core.subs", "line_number": 126, "usage_type": "call"}, {"api_name": "dask.core.fuse", "line_number": 130, "usage_type": "call"}, {"api_name": "dask.core.fuse", "line_number": 142, "usage_type": "call"}, {"api_name": "dask.core.fuse", "line_number": 157, "usage_type": "call"}, {"api_name": "dask.core.fuse", "line_number": 175, "usage_type": "call"}, {"api_name": "dask.core.fuse", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "493898608", "text": "import logging\nfrom logging import Logger\n\n\ndef get_logger() -> Logger:\n log_level = logging.DEBUG\n\n new_logger = logging.getLogger(\"Automation Tests\")\n new_logger.setLevel(log_level)\n\n file_handler = logging.FileHandler(\"shell-tests.log\", \"w\")\n file_handler.setLevel(log_level)\n std_handler = logging.StreamHandler()\n std_handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n \"%(asctime)s - %(threadName)s - %(levelname)s - %(message)s\"\n )\n std_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n\n new_logger.addHandler(std_handler)\n new_logger.addHandler(file_handler)\n\n return new_logger\n\n\nlogger = get_logger()\n", "sub_path": "shell_tests/helpers/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.DEBUG", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.Logger", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "418252930", "text": "\"\"\"\nParsing fasta file module.\n\"\"\"\nimport os.path\nimport collections\nimport logging\n\nfrom pypcpe2 import utility\n\n\ndef retrieve_fasta_id(id_line):\n \"\"\"\n Parse fasta first line to get the fasta id.\n\n Return:\n id (str)\n \"\"\"\n return id_line.split('|')[1]\n\n\ndef read_fasta_file(path):\n \"\"\"\n Generator function for reading fasta file.\n\n Returns:\n info: the first line of the sequences\n seq: the main body of the sequence\n \"\"\"\n id_line = \"\"\n seq = \"\"\n\n with open(path, 'r') as fin:\n for line in fin:\n if line.startswith('>'):\n if id_line and seq:\n # Yield the previous one\n yield id_line, seq\n id_line = line.strip()\n seq = \"\"\n else:\n seq += line.strip()\n\n if id_line and seq:\n # Yield the last one\n yield id_line.strip(), seq.strip()\n\n\ndef create_seq_fasta_id_file(fasta_path, seq_path, id_path):\n \"\"\"\n Parse fasta file to make a unique sequence file and a id mapping file.\n\n The seq file format is as the following\n\n [number of seqs]\n [len_0] [seq_0]\n [len_1] [seq_1]\n ...\n [len_n] [seq_n]\n\n The id file format is as the following\n\n [number of seqs]\n [size_m0] [id_0_0] [id_0_1] ... [id_0_m0]\n [size_m1] [id_1_0] [id_1_1] ... [id_1_m1]\n ...\n [size_mn] [id_n_0] [id_n_1] ... [id_n_mn]\n\n The seq file and id file is line-to-line mapping. It means the seq has\n one or several fasta ids in the same line number of id file.\n\n Args:\n fasta_path (str): The input fasta file\n seq_path (str): The output seqeunce file.\n id_path (str): The output id mapping file.\n \"\"\"\n seq_ids = dict()\n\n for info, seq in read_fasta_file(fasta_path):\n fasta_id = retrieve_fasta_id(info)\n seq_ids.setdefault(seq, []).append(fasta_id)\n\n with open(seq_path, 'w') as f_seq, open(id_path, 'w') as f_id:\n f_seq.write(str(len(seq_ids)) + \"\\n\")\n f_id.write(str(len(seq_ids)) + \"\\n\")\n\n # Make sure the two files are line-to-line mapping, write the seq and\n # ids in the same time.\n for seq, ids in seq_ids.items():\n f_seq.write(\" \".join([str(len(seq)), seq]) + \"\\n\")\n\n write_ids = [len(ids)] + ids\n f_id.write(\" \".join([str(i) for i in write_ids]) + \"\\n\")\n\n logging.info(\"Parse {path} - {seq_size} distinct sequence(s)\".format(\n path=fasta_path, seq_size=len(seq_ids)))\n\n\ndef create_fasta_id_info_file(fasta_path, info_path):\n \"\"\"\n Parse fasta file to make a fasta id information file.\n\n The output file format is as the following.\n\n [FASTA ID_0] [INFO_0]\n [FASTA ID_1] [INFO_1]\n ...\n [FASTA ID_n] [INFO_n]\n\n Args:\n fasta_path (str): The input fasta file\n info_path (str): The output fasta information file.\n \"\"\"\n info_count = 0\n with open(info_path, 'w') as fout:\n for info, _ in read_fasta_file(fasta_path):\n fasta_id = retrieve_fasta_id(info)\n\n fout.write(\" \".join([fasta_id, info]) + \"\\n\")\n info_count = info_count + 1\n\n logging.info(\"Parse {path} - {size} fasta id(s)\".format(\n path=fasta_path, size=info_count))\n\n\nclass FastaSeqPath(object):\n \"\"\"\n Present all file paths for the fasta file.\n \"\"\"\n def __init__(self, raw_path, *, seq_path=None,\n fasta_id_path=None, fasta_id_info_path=None):\n \"\"\"\n Init the object.\n\n The object records all path information. It would create the related\n data strucutre.\n\n If the argument is None, the object will have a default path. The path\n is under temp folder. If you have these files, please specify them.\n \"\"\"\n self._raw_path = os.path.abspath(raw_path)\n name = utility.retrieve_basename(self._raw_path)\n\n get_valid_path = lambda default_path, arg_path: \\\n os.path.abspath(arg_path) if arg_path is not None else default_path\n\n self._seq_path = get_valid_path(\n utility.make_temp_path(name + \"_seq.txt\"), seq_path)\n self._fasta_id_path = get_valid_path(\n utility.make_temp_path(name + \"_id.txt\"), fasta_id_path)\n self._fasta_id_info_path = get_valid_path(\n utility.make_temp_path(name + \"_id_info.txt\"), fasta_id_info_path)\n\n @property\n def raw_path(self):\n return self._raw_path\n\n @property\n def seq_path(self):\n return self._seq_path\n\n @property\n def fasta_id_path(self):\n return self._fasta_id_path\n\n @property\n def fasta_id_info_path(self):\n return self._fasta_id_info_path\n\n\nSequence = collections.namedtuple('Sequence', ['raw_seq', 'fasta_ids'])\n\n\nclass Sequences(collections.abc.Mapping):\n \"\"\"\n Present each sequences'info. A dict's wrapper\n key (int) presents the sequence id\n value (Sequence) presents the sequence information\n raw_seq presents the orignal seqeuence\n ids present the fasta ids of the sequence\n \"\"\"\n def __init__(self, seqs):\n \"\"\"\n Users can not call the init function directly.\n \"\"\"\n self._seqs = seqs\n\n def __contains__(self, sid):\n return sid in self._seqs\n\n def __getitem__(self, sid):\n return self._seqs[sid]\n\n def __iter__(self):\n return iter(self._seqs)\n\n def __len__(self):\n return len(self._seqs)\n\n @staticmethod\n def read_file(seq_path, fasta_id_path, sequence_ids=None):\n \"\"\"\n Create a dict to save the sequence and fasta ids.\n\n It's possible a unique seqeucne to map several fasta ids.\n\n Args:\n seq_path (str): the input sequence file path\n id_path (str): the input ids file path\n sequence_ids ([int]): a list of sequence ids.\n If the parameter is None, it save all sequences from\n these files. Otherwise it only saves sequences which sequence\n ids appear in the list.\n\n Return:\n A Sequences object\n \"\"\"\n seqs = dict()\n\n with open(seq_path, 'r') as fseq, open(fasta_id_path, 'r') as fid:\n seq_lines = fseq.read().splitlines()[1:]\n id_lines = fid.read().splitlines()[1:]\n\n for sid, (seq_line, ids_line) in enumerate(\n zip(seq_lines, id_lines)):\n if sequence_ids is None or sid in sequence_ids:\n raw_seq = seq_line.split()[1]\n fasta_ids = ids_line.split()[1:]\n seqs[sid] = Sequence(raw_seq, fasta_ids)\n\n return Sequences(seqs)\n\n\nclass FastaIDInfos(collections.abc.Mapping):\n \"\"\"\n Present each fasta id's information. A dict's wrapper.\n key (str) presents fasta id.\n value (str) presents fasta information.\n \"\"\"\n def __init__(self, id_info):\n \"\"\"\n Users can not call the init function directly.\n \"\"\"\n self._id_info = id_info\n\n def __contains__(self, sid):\n return sid in self._id_info\n\n def __getitem__(self, sid):\n return self._id_info[sid]\n\n def __iter__(self):\n return iter(self._id_info)\n\n def __len__(self):\n return len(self._id_info)\n\n @staticmethod\n def read_file(path, fasta_ids=None):\n \"\"\"\n Create a dictionary to save the information for each fasta id.\n\n Args:\n id_info_path (str): the input file path\n fasta_ids ([str]): a list of fasta ids.\n If the parameter is None, it save all fasta id informations from\n the file. Otherwise it only saves information which fasta ids\n appear in the list.\n\n Return:\n a FastaIDinfos object.\n \"\"\"\n id_info = dict()\n with open(path, 'r') as fin:\n for line in fin:\n words = line.split()\n\n fasta_id = words[0]\n fasta_info = \" \".join(words[1:])\n\n if fasta_ids is None or fasta_id in fasta_ids:\n id_info[fasta_id] = fasta_info\n\n return FastaIDInfos(id_info)\n\n\nclass FastaSeq(object):\n \"\"\"\n A small helper class to collect two major fasta data structures.\n \"\"\"\n def __init__(self, fasta_seq_path, *, sequence_ids=None):\n \"\"\"\n Init two fasta data structures by reading file.\n\n Args:\n fasta_seq_path (FastaSeqPath): the input file paths\n sequence_ids ([int]): a list of sequence ids.\n If the parameter is None, it save all sequences from\n these files. Otherwise it only saves sequences which sequence\n ids appear in the list.\n \"\"\"\n self._seqs = Sequences.read_file(fasta_seq_path.seq_path,\n fasta_seq_path.fasta_id_path,\n sequence_ids)\n fasta_ids = None\n if sequence_ids is not None:\n fasta_ids = {fasta_id\n for seq in self._seqs.values()\n for fasta_id in seq.fasta_ids}\n\n self._fasta_id_infos = FastaIDInfos.read_file(\n fasta_seq_path.fasta_id_info_path, fasta_ids)\n\n @property\n def seqs(self):\n return self._seqs\n\n @property\n def fasta_id_infos(self):\n return self._fasta_id_infos\n\n\ndef parse_fasta_file(path):\n \"\"\"\n Parsing the input fasta file to create related data structures.\n\n Args:\n fasta_path (str): the input fasta file path\n\n Return:\n a FastaSeqPath object to point out the paths which contains parsed\n infomation for the input fasta file.\n \"\"\"\n fasta_seq_path = FastaSeqPath(path)\n\n create_seq_fasta_id_file(fasta_seq_path.raw_path,\n fasta_seq_path.seq_path,\n fasta_seq_path.fasta_id_path)\n create_fasta_id_info_file(fasta_seq_path.raw_path,\n fasta_seq_path.fasta_id_info_path)\n\n return fasta_seq_path\n", "sub_path": "pypcpe2/read_fasta.py", "file_name": "read_fasta.py", "file_ext": "py", "file_size_in_byte": 10105, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.info", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.path.abspath", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 140, "usage_type": "name"}, {"api_name": "pypcpe2.utility.retrieve_basename", "line_number": 141, "usage_type": "call"}, {"api_name": "pypcpe2.utility", "line_number": 141, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 144, "usage_type": "name"}, {"api_name": "pypcpe2.utility.make_temp_path", "line_number": 147, "usage_type": "call"}, {"api_name": "pypcpe2.utility", "line_number": 147, "usage_type": "name"}, {"api_name": "pypcpe2.utility.make_temp_path", "line_number": 149, "usage_type": "call"}, {"api_name": "pypcpe2.utility", "line_number": 149, "usage_type": "name"}, {"api_name": "pypcpe2.utility.make_temp_path", "line_number": 151, "usage_type": "call"}, {"api_name": "pypcpe2.utility", "line_number": 151, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 170, "usage_type": "call"}, {"api_name": "collections.abc", "line_number": 173, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 233, "usage_type": "attribute"}]} +{"seq_id": "440842924", "text": "import unittest\nimport json\n\nfrom flask_api import status\n\nfrom api import db\nfrom database.models.users import User\nfrom database.managers.user_manager import add_user\nfrom core.messages import error_messages as error\nfrom api.tests.base import BaseTestCase\nfrom api.tests.auth_tests.base_auth import user_registration, user_status\nfrom api.tests.auth_tests.base_auth import user_login\n\n\nclass TestUserStatus(BaseTestCase):\n def test_user_status(self):\n \"\"\"AUTH-TEST:>>>> Test for user status \\n \"\"\"\n with self.client:\n user_info = {'first_name': 'test_name',\n 'last_name': 'tes_last',\n 'username': 'joe',\n 'email': 'joe@gmail.com',\n 'is_active': True,\n 'is_admin': False,\n 'password': 'test'}\n response_register = user_registration(self, user_info)\n response = user_status(self, response_register)\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['data'] is not None)\n self.assertTrue(data['data']['username'] == 'joe')\n self.assertTrue(data['data']['email'] == 'joe@gmail.com')\n self.assertTrue(data['data']['is_admin'] is 'true' or 'false')\n self.assertTrue(data['data']['is_active'] is 'true' or 'false')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_invalid_status_inactive(self):\n user_info = {'first_name': 'test_name',\n 'last_name': 'tes_last',\n 'username': 'joe',\n 'email': 'joe@gmail.com',\n 'is_active': False,\n 'is_admin': False,\n 'password': 'test'}\n add_user(user_info)\n # update user\n user = User.query.filter_by(email='joe@gmail.com').first()\n user.is_active = False\n db.session.commit()\n with self.client:\n response_login = user_login(self, 'joe@gmail.com', '123456')\n data = json.loads(response_login.data.decode())\n print(data['status'])\n print(data['message'])\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message']\n == error.USER_INACTIVE)\n self.assertTrue(response_login.content_type == 'application/json')\n self.assertEqual(response_login.status_code,\n status.HTTP_202_ACCEPTED)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "api/tests/auth_tests/test_user_status.py", "file_name": "test_user_status.py", "file_ext": "py", "file_size_in_byte": 2659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "api.tests.base.BaseTestCase", "line_number": 15, "usage_type": "name"}, {"api_name": "api.tests.auth_tests.base_auth.user_registration", "line_number": 26, "usage_type": "call"}, {"api_name": "api.tests.auth_tests.base_auth.user_status", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "flask_api.status.HTTP_200_OK", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 35, "usage_type": "name"}, {"api_name": "database.managers.user_manager.add_user", "line_number": 45, "usage_type": "call"}, {"api_name": "database.models.users.User.query.filter_by", "line_number": 47, "usage_type": "call"}, {"api_name": "database.models.users.User.query", "line_number": 47, "usage_type": "attribute"}, {"api_name": "database.models.users.User", "line_number": 47, "usage_type": "name"}, {"api_name": "api.db.session.commit", "line_number": 49, "usage_type": "call"}, {"api_name": "api.db.session", "line_number": 49, "usage_type": "attribute"}, {"api_name": "api.db", "line_number": 49, "usage_type": "name"}, {"api_name": "api.tests.auth_tests.base_auth.user_login", "line_number": 51, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "core.messages.error_messages.USER_INACTIVE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "core.messages.error_messages", "line_number": 57, "usage_type": "name"}, {"api_name": "flask_api.status.HTTP_202_ACCEPTED", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 60, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "223228295", "text": "# selenium for python\r\nfrom selenium import webdriver\r\n\r\n# IR-NET proxy set\r\nproxy = \"proxy.gateway-net.mizuho-ir.co.jp:10080\"\r\nwebdriver.DesiredCapabilities.INTERNETEXPLORER['proxy'] = {\r\n \"httpProxy\":proxy,\r\n \"ftpProxy\":None,\r\n \"sslProxy\":proxy,\r\n \"noProxy\":None,\r\n \"proxyType\":\"MANUAL\",\r\n \"class\":\"org.openqa.selenium.Proxy\",\r\n \"autodetect\":False\r\n}\r\n\r\ndriver = webdriver.Ie(\"./driver-3.12.0/IEDriverServer.exe\")\r\ndriver.get(\"https://www.compas.mizuhofg.co.jp/compas/index.jsp\")\r\n\r\n\r\n# assert \"Python\" in driver.title\r\n# elem = driver.find_element_by_name(\"q\")\r\n# elem.clear()\r\n# elem.send_keys(\"pycon\")\r\n# elem.send_keys(Keys.RETURN)\r\n# assert \"No results found.\" not in driver.page_source\r\ndriver.find_element_by_name(\"00業務メニュー\").click\r\n\r\ndriver.close()\r\n\r\n", "sub_path": "gyomeOpe.py", "file_name": "gyomeOpe.py", "file_ext": "py", "file_size_in_byte": 795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "selenium.webdriver.DesiredCapabilities", "line_number": 6, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 6, "usage_type": "name"}, {"api_name": "selenium.webdriver.Ie", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "194534902", "text": "import pygame\nimport random\nfrom EnzymaniaClasses import Drawable, Enzyme, Metabolite\n\nRUNNING = True\ndef newDrawable(x =random.randint(1, 600), y= random.randint(1, 800), angle = None):\n d = Drawable()\n d.x = x\n d.xsize = random.randint(10, 30)\n d.y = y\n d.ysize = random.randint(10, 30)\n d.yvel = -1\n d.xvel = -1\n if angle is None:\n d.angle =random.randint(0,180)\n else:\n d.angle = angle\n print(\"new\", d)\n print(d.yvel, d.xvel, d.angle)\n return d\n\ndef newPaddle(x = None, y = None ):\n d = Drawable()\n d.x = x\n d.xsize = 10\n d.y = y\n d.ysize = 30\n d.yvel = 1\n d.xvel = 0\n print(\"new\", d)\n print(d.yvel, d.xvel, d.angle)\n return d\n\n\ndef newBall(x = None, y = None ):\n d = Drawable()\n d.x = x\n d.xsize = 10\n d.y = y\n d.ysize = 30\n d.yvel = float(random.randint(0,2))\n d.xvel = float(random.randint(0,2))\n print(\"new\", d)\n print(d.yvel, d.xvel, d.angle)\n return d\n\ndef checkEvents(entityList):\n global RUNNING\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n #print(event)\n if event.key == pygame.K_ESCAPE:\n print(\"escape\")\n RUNNING = False\n pygame.event.post(pygame.event.Event(pygame.QUIT))\n elif event.key == pygame.K_n:\n print(\"n pressed\")\n entityList.append(newDrawable())\n elif event.key == pygame.K_p:\n print(\"p pressed\")\n mouseX, mouseY = pygame.mouse.get_pos()\n entityList.append(newPaddle(x=mouseX, y=mouseY))\n elif event.key == pygame.K_z:\n mouseX, mouseY = pygame.mouse.get_pos()\n entityList.append(newDrawable(x=mouseX, y=mouseY))\n elif event.key == pygame.K_b:\n mouseX, mouseY = pygame.mouse.get_pos()\n entityList.append(newBall(x=mouseX, y=mouseY))\n elif event.key == pygame.K_s:\n mouseX, mouseY = pygame.mouse.get_pos()\n elif event.key == pygame.K_e:\n mouseX, mouseY = pygame.mouse.get_pos()\n entityList.append(Enzyme(x=mouseX, y=mouseY))\n elif event.key == pygame.K_m:\n mouseX, mouseY = pygame.mouse.get_pos()\n entityList.append(Metabolite(x=mouseX, y=mouseY))\n # print(\"r pressed\")\n # entityList.append(newMetab())\n #elif event.key == pygame.K_m:\n # for e in entityList:\n # e.move()\n #if isinstance(e,Enzyme):\n #\te.move(x = -1* random.randint(0,15), y=-1*random.randint(5,10))\n #e.move(x = random.randint(0,15),y = random.randint(0,5))\n if event.type == pygame.QUIT:\n RUNNING = False\n\n\nWIDTH = 800\nHEIGHT = 600\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.mouse.set_visible(1)\n clock = pygame.time.Clock()\n global RUNNING\n RUNNING = True\n entityList = []\n while RUNNING:\n clock.tick(60)\n checkEvents(entityList)\n screen.fill((0, 0, 0))\n screen.blit(screen, (0, 0))\n for i, e in enumerate(entityList):\n e.checkCollisionList(entityList[:i])\n bounceOff(e)\n e.angle+=0.1\n if isOutOfSight(e):\n entityList.remove(e)\n e.move()\n e.draw(screen)\n pygame.display.flip()\n\n\ndef bounceOff(entity):\n if entity.x+entity.xsize > WIDTH or entity.x <0:\n if entity.xvel > 0:\n entity.xvel += 0\n else:\n entity.xvel -=0\n entity.xvel *=-1\n entity.move()\n\n if entity.y+entity.ysize > HEIGHT or entity.y < 0:\n if entity.yvel > 0:\n entity.yvel += 0\n else:\n entity.yvel -= 0\n entity.yvel *= -1\n entity.move()\n\n\ndef isOutOfSight(entity):\n out = False\n if entity.x > WIDTH or entity.x < 0 or entity.y > HEIGHT or entity.y < 0:\n out = True\n return out\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "Enzymania/enzymania/sandbox/t0.py", "file_name": "t0.py", "file_ext": "py", "file_size_in_byte": 4118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "random.randint", "line_number": 6, "usage_type": "call"}, {"api_name": "EnzymaniaClasses.Drawable", "line_number": 7, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 9, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 11, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 15, "usage_type": "call"}, {"api_name": "EnzymaniaClasses.Drawable", "line_number": 23, "usage_type": "call"}, {"api_name": "EnzymaniaClasses.Drawable", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.event.post", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.event.Event", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.QUIT", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.K_n", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.K_p", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.K_z", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.K_b", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.K_e", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 72, "usage_type": "attribute"}, {"api_name": "EnzymaniaClasses.Enzyme", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.K_m", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 75, "usage_type": "attribute"}, {"api_name": "EnzymaniaClasses.Metabolite", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.QUIT", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 113, "usage_type": "attribute"}]} +{"seq_id": "344914219", "text": "\nfrom pyspark.sql import SparkSession\n\nlogFile = \"word_count.text\" # Should be some file on your system\nspark = SparkSession.builder.appName(\"DocNotes1\").getOrCreate()\nlogData = spark.read.text(logFile)\n\nnumAs = logData.filter(logData.value.contains('a')).count()\nnumBs = logData.filter(logData.value.contains('b')).count()\nnum = logData.count() #counting lines\nprint(\"Lines with a: %i, lines with b: %i ,count %i\" % (numAs, numBs,num))\n\nspark.stop()", "sub_path": "preprocessing/docNotes1.py", "file_name": "docNotes1.py", "file_ext": "py", "file_size_in_byte": 451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 5, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "4763843", "text": "from __future__ import absolute_import\nfrom django.test import TestCase\nfrom django_find.parsers.query import QueryParser\n\nname_map = {'host': 'Device.metadata_id',\n 'model': 'Device.model',\n 'interface': 'Unit.interface'}\n\nquery1 = 'host:^test (model:foo or interface:bar)'\nexpected_dom1 = \"\"\"Group(root)\n Term: Device.metadata_id startswith 'test'\n Or\n Term: Device.model contains 'foo'\n Term: Unit.interface contains 'bar'\"\"\"\n\nquery2 = 'test (model:foo or interface:bar$)'\nexpected_dom2 = \"\"\"Group(root)\n Or\n Term: Device.metadata_id contains 'test'\n Term: Device.model contains 'test'\n Or\n Term: Device.model contains 'foo'\n Term: Unit.interface endswith 'bar'\"\"\"\n\nclass QueryParserTest(TestCase):\n def setUp(self):\n self.maxDiff = None\n self.parser = QueryParser(name_map, ('host', 'model'))\n\n def testParser(self):\n dom = self.parser.parse(query1)\n self.assertEqual(expected_dom1, dom.dump())\n\n dom = self.parser.parse(query2)\n self.assertEqual(expected_dom2, dom.dump())\n", "sub_path": "tests/parsers/test_query.py", "file_name": "test_query.py", "file_ext": "py", "file_size_in_byte": 1070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.test.TestCase", "line_number": 25, "usage_type": "name"}, {"api_name": "django_find.parsers.query.QueryParser", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "395347298", "text": "import argparse\nimport csv\nimport logging\n\nimport helpers\nfrom base import run_command\nfrom commands import save_json_file\nfrom gcp_utils.projects import (\n get_json_list_projects,\n check_project_exists\n)\nfrom parser_arguments import add_simulation_arguments\nfrom gcp_utils.service_accounts import (\n use_service_account,\n remove_from_police,\n set_police_on_organization,\n get_policies_organization,\n get_project_number,\n get_project_name\n)\n\nservices_accounts_csv_file = \"services_accounts.csv\"\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef create_cleanup_services_accounts_parser():\n \"\"\"\n CLI cleanup project parser\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Run setup on projects of a organization.')\n\n parser.add_argument(\n '-k',\n '--key_file',\n help='Service Account Key File',\n dest='key_file')\n\n parser.add_argument(\n '-o',\n '--organization_id',\n help='Organization ID',\n dest='organization_id',\n required=True)\n\n parser.add_argument(\n '-q',\n '--quiet',\n help='Disable all interactive prompts when running',\n dest='quiet',\n action='store_true')\n\n parser.set_defaults(dry_run=True)\n\n return parser\n\n\ndef get_services_accounts(organization_id):\n \"\"\"\n Get services accounts\n :param organization_id: organization\n \"\"\"\n LOGGER.info(\"Getting services accounts\")\n data = run_command([\n 'gcloud', 'organizations', 'get-iam-policy', organization_id,\n \"--flatten='bindings[]'\",\n \"--format='csv(bindings.members)'\"\n ])\n if not helpers.DRY_RUN:\n with open(services_accounts_csv_file, 'wb') as output:\n output.write(data)\n else:\n with open(services_accounts_csv_file, 'wb') as output:\n output.write(b'deployer@control.iam.gserviceaccount.com')\n\n\ndef check_services_accounts_to_delete(organization_id, \n original_policies_file, \n new_policies_file):\n \"\"\"\n Check services accounts to delete.\n :param organization_id: organization id\n :param original_policies_file: original policies\n :param new_policies_file: new policies\n \"\"\"\n with open(services_accounts_csv_file, 'rt') as f:\n reader = csv.reader(f, delimiter=',')\n members = list(reader)\n\n policies = get_policies_organization(organization_id)\n save_json_file(original_policies_file, policies)\n LOGGER.info(\"Backup police file saved as: \" + original_policies_file)\n\n projects_list = get_json_list_projects(\"\", \"\", organization_id)\n\n for group in members:\n group_members = group[0].split(';')\n for member in group_members:\n if member.startswith('serviceAccount'):\n project_name = get_project_name(member)\n project_number = get_project_number(member)\n if (not check_project_exists(project_name, project_number,\n projects_list)):\n LOGGER.info(\"Deleting \" + member)\n remove_from_police(member, policies)\n\n save_json_file(new_policies_file, policies)\n LOGGER.info(\"New police file saved as: \" + new_policies_file)\n\n\ndef clean_services_account(organization_id):\n \"\"\"\n Remove permissions from services account of projects that are not visible\n or have been deleted. This method is destructive. Use with caution.\n :param organization_id: organization id\n \"\"\"\n print(\n \"Cleanup old services accounts to organization \" + str(organization_id))\n original_policies_file = \"original_organization_policies.json\"\n new_policies_file = \"new_organization_policies_file.json\"\n\n get_services_accounts(organization_id)\n check_services_accounts_to_delete(organization_id, original_policies_file,\n new_policies_file)\n set_police_on_organization(organization_id, new_policies_file)\n\n\ndef main_cleanup_services_accounts():\n \"\"\"\n CLI entry point.\n \"\"\"\n parser = create_cleanup_services_accounts_parser()\n add_simulation_arguments(parser)\n args = parser.parse_args()\n helpers.DRY_RUN = args.dry_run\n\n if args.key_file:\n use_service_account(args.key_file)\n\n clean_services_account(args.organization_id)\n\n\nif __name__ == '__main__':\n main_cleanup_services_accounts()", "sub_path": "securitycenter/setup-scripts/clean_staled_service_accounts.py", "file_name": "clean_staled_service_accounts.py", "file_ext": "py", "file_size_in_byte": 4446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "base.run_command", "line_number": 65, "usage_type": "call"}, {"api_name": "helpers.DRY_RUN", "line_number": 70, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 88, "usage_type": "call"}, {"api_name": "gcp_utils.service_accounts.get_policies_organization", "line_number": 91, "usage_type": "call"}, {"api_name": "commands.save_json_file", "line_number": 92, "usage_type": "call"}, {"api_name": "gcp_utils.projects.get_json_list_projects", "line_number": 95, "usage_type": "call"}, {"api_name": "gcp_utils.service_accounts.get_project_name", "line_number": 101, "usage_type": "call"}, {"api_name": "gcp_utils.service_accounts.get_project_number", "line_number": 102, "usage_type": "call"}, {"api_name": "gcp_utils.projects.check_project_exists", "line_number": 103, "usage_type": "call"}, {"api_name": "gcp_utils.service_accounts.remove_from_police", "line_number": 106, "usage_type": "call"}, {"api_name": "commands.save_json_file", "line_number": 108, "usage_type": "call"}, {"api_name": "gcp_utils.service_accounts.set_police_on_organization", "line_number": 126, "usage_type": "call"}, {"api_name": "parser_arguments.add_simulation_arguments", "line_number": 134, "usage_type": "call"}, {"api_name": "helpers.DRY_RUN", "line_number": 136, "usage_type": "attribute"}, {"api_name": "gcp_utils.service_accounts.use_service_account", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "257243385", "text": "import os\nimport json\nimport logging\nimport socket\nimport sys\nimport time\nfrom enum import Enum\n\nimport requests\nfrom netaddr import IPNetwork\n\nfrom netifaces import AF_INET, ifaddresses, interfaces\n\nfrom ...core.events import handler\nfrom ...core.events.common import Event, NewHostEvent, Vulnerability\nfrom ...core.types import Discovery, InformationDisclosure, Azure\n\nclass HostScanEvent(Event):\n def __init__(self, pod=False, active=False, predefined_hosts=list()):\n self.active = active # flag to specify whether to get actual data from vulnerabilities\n self.predefined_hosts = predefined_hosts\n\nclass HostDiscoveryHelpers:\n @staticmethod\n def get_cloud(host):\n try:\n logging.debug(\"Checking whether the cluster is deployed on azure's cloud\")\n metadata = requests.get(\"http://www.azurespeed.com/api/region?ipOrUrl={ip}\".format(ip=host)).text\n except requests.ConnectionError as e:\n logging.info(\"- unable to check cloud: {0}\".format(e))\n return\n if \"cloud\" in metadata:\n return json.loads(metadata)[\"cloud\"]\n\n # generator, generating a subnet by given a cidr\n @staticmethod\n def generate_subnet(ip, sn=\"24\"):\n logging.debug(\"HostDiscoveryHelpers.generate_subnet {0}/{1}\".format(ip, sn))\n subnet = IPNetwork('{ip}/{sn}'.format(ip=ip, sn=sn))\n for ip in IPNetwork(subnet):\n logging.debug(\"HostDiscoveryHelpers.generate_subnet yielding {0}\".format(ip))\n yield ip\n\n\n@handler.subscribe(HostScanEvent)\nclass HostDiscovery(Discovery):\n def __init__(self, event):\n self.event = event\n\n def execute(self):\n self.scan_interfaces()\n \n def scan_interfaces(self):\n try:\n logging.debug(\"HostDiscovery hunter attempting to get external IP address\")\n external_ip = requests.get(\"http://canhazip.com\").text # getting external ip, to determine if cloud cluster\n except requests.ConnectionError as e:\n logging.debug(\"unable to determine local IP address: {0}\".format(e))\n logging.info(\"~ default to 127.0.0.1\")\n external_ip = \"127.0.0.1\"\n cloud = HostDiscoveryHelpers.get_cloud(external_ip)\n for ip in self.generate_interfaces_subnet():\n handler.publish_event(NewHostEvent(host=ip, cloud=cloud))\n\n def generate_interfaces_subnet(self, sn='24'):\n for ifaceName in interfaces():\n for ip in [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [])]:\n if not self.event.localhost and InterfaceTypes.LOCALHOST.value in ip.__str__():\n continue\n for ip in HostDiscoveryHelpers.generate_subnet(ip, sn):\n yield ip\n \nclass InterfaceTypes(Enum):\n LOCALHOST = \"127\"\n", "sub_path": "src/modules/discovery/hosts.py", "file_name": "hosts.py", "file_ext": "py", "file_size_in_byte": 2829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "core.events.common.Event", "line_number": 18, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 29, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 30, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 38, "usage_type": "call"}, {"api_name": "netaddr.IPNetwork", "line_number": 39, "usage_type": "call"}, {"api_name": "netaddr.IPNetwork", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 41, "usage_type": "call"}, {"api_name": "core.types.Discovery", "line_number": 46, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 55, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 57, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 59, "usage_type": "call"}, {"api_name": "core.events.handler.publish_event", "line_number": 63, "usage_type": "call"}, {"api_name": "core.events.handler", "line_number": 63, "usage_type": "name"}, {"api_name": "core.events.common.NewHostEvent", "line_number": 63, "usage_type": "call"}, {"api_name": "netifaces.interfaces", "line_number": 66, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 67, "usage_type": "argument"}, {"api_name": "netifaces.ifaddresses", "line_number": 67, "usage_type": "call"}, {"api_name": "core.events.handler.subscribe", "line_number": 45, "usage_type": "call"}, {"api_name": "core.events.handler", "line_number": 45, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "587952521", "text": "# users/views.py\nfrom django.views import generic\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .models import CustomUser, Userpost, Blog, Expenses\nfrom .forms import CustomUserCreationForm, PostForm, BlogForm, ExpenseForm\n\n\nclass SignUp(generic.CreateView):\n form_class = CustomUserCreationForm\n success_url = '/users/login'\n template_name = 'signup.html'\n\ndef birthday(request):\n users = CustomUser.objects.all()\n context = {\n 'users': users\n }\n if (request.user.is_authenticated):\n return render(request, 'birthdays.html', context=context)\n else:\n return redirect('login')\n\ndef details(request):\n details = CustomUser.objects.all()\n context = {\n 'details': details\n }\n if (request.user.is_authenticated):\n return render(request, 'details.html', context=context)\n else:\n return redirect('login')\n\ndef detaildisplay(request,customuser_id):\n details = get_object_or_404(CustomUser, pk=customuser_id)\n context = {\n 'details': details\n }\n if (request.user.is_authenticated):\n return render(request, 'detaildisplay.html', context=context)\n else:\n return redirect('login')\n\n\nclass userposts(generic.CreateView):\n form_class = PostForm\n template_name = 'posts.html'\n success_url = '/users/leaveapplications'\n\ndef blogcreate(request):\n if request.method == \"POST\":\n form = BlogForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.blogauthor = request.user\n post.save()\n return redirect('/users/blogtitles', pk=post.pk)\n else:\n form = BlogForm()\n return render(request, 'blogcreate.html', {'form': form})\n\n\ndef leaveapplications(request):\n leaves = Userpost.objects.order_by('created_date')\n context = {\n 'leaves': leaves\n }\n if (request.user.is_authenticated):\n return render(request, 'Leaveapplications.html', context=context)\n else:\n return redirect('login')\n\ndef conformation(request):\n return render(request, 'conformation.html')\n\n\n\ndef blogdisplay(request, blog_id):\n blog = get_object_or_404(Blog, pk=blog_id)\n context = {\n 'blog': blog\n }\n if (request.user.is_authenticated):\n return render(request, 'blogdisplay.html', context=context)\n else:\n return redirect('login')\n\ndef bloglist(request):\n blog = Blog.objects.order_by('-created_date')[:]\n context = {\n 'blog': blog\n }\n if (request.user.is_authenticated):\n return render(request, 'blogtitle.html', context=context)\n else:\n return redirect('login')\n\ndef expenses(request):\n if request.method == \"POST\":\n form = ExpenseForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.expenseauthor = request.user\n post.save()\n return redirect('/users/expensetitles', pk=post.pk)\n else:\n form = ExpenseForm()\n return render(request, 'expenses.html', {'form': form})\n\ndef expensetitles(request):\n exp = Expenses.objects.order_by('-created_date')[:]\n context = {\n 'exp': exp\n }\n if (request.user.is_authenticated):\n return render(request, 'expensetitle.html', context=context)\n else:\n return redirect('login')\n\ndef expensedisplay(request, expenses_id):\n exp = get_object_or_404(Expenses, pk=expenses_id)\n context = {\n 'exp': exp\n }\n if (request.user.is_authenticated):\n return render(request, 'expensedisplay.html', context=context)\n else:\n return redirect('login')\n", "sub_path": "users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.views.generic.CreateView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 8, "usage_type": "name"}, {"api_name": "forms.CustomUserCreationForm", "line_number": 9, "usage_type": "name"}, {"api_name": "models.CustomUser.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.CustomUser.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.CustomUser", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 21, "usage_type": "call"}, {"api_name": "models.CustomUser.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "models.CustomUser.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.CustomUser", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 34, "usage_type": "call"}, {"api_name": "models.CustomUser", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 44, "usage_type": "name"}, {"api_name": "forms.PostForm", "line_number": 45, "usage_type": "name"}, {"api_name": "forms.BlogForm", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 56, "usage_type": "call"}, {"api_name": "forms.BlogForm", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Userpost.objects.order_by", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Userpost.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.Userpost", "line_number": 63, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Blog", "line_number": 78, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "models.Blog.objects.order_by", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 88, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "forms.ExpenseForm", "line_number": 99, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "forms.ExpenseForm", "line_number": 106, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Expenses.objects.order_by", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Expenses.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.Expenses", "line_number": 110, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Expenses", "line_number": 120, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "101585454", "text": "import numpy as np\nimport cv2\n\nimport sys\nsys.path.append(\"kong_util\")\nfrom kong_util.build_dataset_combine import Check_dir_exist_and_build\nfrom flow_bm_util import check_flow_quality_then_I_w_F_to_R\nfrom kong_util.matplot_fig_ax_util import Matplot_single_row_imgs\n\nimport matplotlib.pyplot as plt\nfrom step08_b_use_G_generate_0_util import F_01_or_C_01_method1_visual_op, Value_Range_Postprocess_to_01\n\n######################################################################################################################################################################################################\ndef I_Generate_F(model_obj, _1, in_img_pre, _3, _4, use_gt_range, training=False): ### training 這個參數是為了 一開使 用BN ,為了那些exp 還能重現所以才保留,現在用 IN 完全不會使用到他這樣子拉~\n flow_pre = model_obj.generator(in_img_pre, training=training)\n flow_pre = flow_pre[0].numpy()\n flow = Value_Range_Postprocess_to_01(flow_pre, use_gt_range)\n return flow\n\ndef I_Gen_F_basic_data(model_obj, in_img, in_img_pre, gt_flow, rec_hope, exp_obj=None, training=True, bgr2rgb=True):\n '''\n bgr2rgb: tf2 讀出來是 rgb, 但 cv2 存圖是bgr, 所以此狀況記得要轉一下ch 把 bgr2rgb設True!\n 但 plt 存圖是rgb, 所以存圖不用轉ch, 把 bgr2rgb設False喔!\n '''\n in_img = in_img[0].numpy() ### HWC 和 tensor -> numpy\n flow = I_Generate_F(model_obj, None, in_img_pre, None, None, exp_obj.use_gt_range, training=training)\n Cx_visual = (flow[..., 2:3] * 255).astype(np.uint8)\n Cy_visual = (flow[..., 1:2] * 255).astype(np.uint8)\n M_visual = (flow[..., 0:1] * 255).astype(np.uint8)\n gt_flow = gt_flow[0].numpy() ### HWC 和 tensor -> numpy\n Cxgt_visual = (gt_flow[..., 2:3] * 255).astype(np.uint8)\n Cygt_visual = (gt_flow[..., 1:2] * 255).astype(np.uint8)\n Mgt_visual = (gt_flow[..., 0:1] * 255).astype(np.uint8)\n rec_hope = rec_hope[0].numpy()\n\n flow_visual = F_01_or_C_01_method1_visual_op(flow)[:, :, ::-1] ### cv2 處理完 是 bgr, 但這裡都是用 tf2 rgb的角度來處理, 所以就模擬一下 轉乘 tf2 的rgb囉!\n gt_flow_visual = F_01_or_C_01_method1_visual_op(gt_flow)[:, :, ::-1] ### cv2 處理完 是 bgr, 但這裡都是用 tf2 rgb的角度來處理, 所以就模擬一下 轉乘 tf2 的rgb囉!\n\n if(bgr2rgb):\n in_img = in_img [:, :, ::-1] ### tf2 讀出來是 rgb, 但cv2存圖是bgr, 所以記得要轉一下ch\n rec_hope = rec_hope [:, :, ::-1] ### tf2 讀出來是 rgb, 但cv2存圖是bgr, 所以記得要轉一下ch\n flow_visual = flow_visual [:, :, ::-1] ### tf2 讀出來是 rgb, 但cv2存圖是bgr, 所以記得要轉一下ch\n gt_flow_visual = gt_flow_visual[:, :, ::-1] ### tf2 讀出來是 rgb, 但cv2存圖是bgr, 所以記得要轉一下ch\n return in_img, flow, gt_flow, rec_hope, flow_visual, M_visual, Cx_visual, Cy_visual, gt_flow_visual, Mgt_visual, Cxgt_visual, Cygt_visual\n\ndef I_Generate_F_see(model_obj, phase, index, in_img, in_img_pre, gt_flow, _4, rec_hope, exp_obj=None, training=True, see_reset_init=True, postprocess=False, npz_save=False, add_loss=False, bgr2rgb=True):\n current_ep = exp_obj.current_ep\n current_time = exp_obj.current_time\n if (phase == \"train\"): used_sees = exp_obj.result_obj.sees\n elif(phase == \"test\"): used_sees = exp_obj.result_obj.tests\n private_write_dir = used_sees[index].see_write_dir ### 每個 see 都有自己的資料夾 存 in/gt 之類的 輔助檔案 ,先定出位置\n private_rec_write_dir = used_sees[index].rec_visual_write_dir ### 每個 see 都有自己的資料夾 存 in/gt 之類的 輔助檔案 ,先定出位置\n public_write_dir = \"/\".join(used_sees[index].see_write_dir.replace(\"\\\\\", \"/\").split(\"/\")[:-1]) ### private 的上一層資料夾\n '''\n bgr2rgb: tf2 讀出來是 rgb, 但 cv2 存圖是bgr, 所以此狀況記得要轉一下ch 把 bgr2rgb設True!\n '''\n in_img, flow, gt_flow, rec_hope, flow_visual, M_visual, Cx_visual, Cy_visual, gt_flow_visual, Mgt_visual, Cxgt_visual, Cygt_visual = I_Gen_F_basic_data(model_obj, in_img, in_img_pre, gt_flow, rec_hope, exp_obj=exp_obj, training=training, bgr2rgb=bgr2rgb)\n\n if(current_ep == 0 or see_reset_init): ### 第一次執行的時候,建立資料夾 和 寫一些 進去資料夾比較好看的東西\n Check_dir_exist_and_build(private_write_dir) ### 建立 see資料夾\n Check_dir_exist_and_build(private_rec_write_dir) ### 建立 see資料夾\n cv2.imwrite(private_write_dir + \"/\" + \"0a_u1a0-dis_img(in_img).jpg\", in_img) ### 寫一張 in圖進去,進去資料夾時比較好看,0a是為了保證自動排序會放在第一張\n\n if(npz_save is False): np.save (private_write_dir + \"/\" + \"0b_u1b1-gt_flow\", gt_flow) ### 寫一張 gt圖進去,進去資料夾時比較好看,0b是為了保證自動排序會放在第二張\n if(npz_save is True ): np.savez_compressed(private_write_dir + \"/\" + \"0b_u1b1-gt_flow\", gt_flow) ### 寫一張 gt圖進去,進去資料夾時比較好看,0b是為了保證自動排序會放在��二張\n cv2.imwrite(private_write_dir + \"/\" + \"0b_u1b2-gt_flow.jpg\", gt_flow_visual) ### 寫一張 gt圖進去,進去資料夾時比較好看,0b是為了保證自動排序會放在第二張\n cv2.imwrite(private_write_dir + \"/\" + \"0b_u1b3-gt_Cx.jpg\", Cxgt_visual) ### 寫一張 gt圖進去,進去資料夾時比較好看,0b是為了保證自動排序會放在第二張\n cv2.imwrite(private_write_dir + \"/\" + \"0b_u1b4-gt_Cy.jpg\", Cygt_visual) ### 寫一張 gt圖進去,進去資料夾時比較好看,0b是為了保證自動排序會放在第二張\n cv2.imwrite(private_write_dir + \"/\" + \"0c-rec_hope.jpg\", rec_hope) ### 寫一張 rec_hope圖進去,hope 我 rec可以做到這麼好ˊ口ˋ,0c是為了保證自動排序會放在第三張\n if(npz_save is False): np.save (private_write_dir + \"/\" + \"epoch_%04i_u1b1_flow\" % current_ep, flow) ### 我覺得不可以直接存npy,因為太大了!但最後為了省麻煩還是存了,相對就減少see的數量來讓總大小變小囉~\n if(npz_save is True ): np.savez_compressed(private_write_dir + \"/\" + \"epoch_%04i_u1b1_flow\" % current_ep, flow) ### 我覺得不可以直接存npy,因為太大了!但最後為了省麻煩還是存了,相對就減少see的數量來讓總大小變小囉~\n cv2.imwrite(private_write_dir + \"/\" + \"epoch_%04i_u1b2_flow.jpg\" % current_ep, flow_visual) ### 把 生成的 flow_visual 存進相對應的資料夾\n cv2.imwrite(private_write_dir + \"/\" + \"epoch_%04i_u1b3_Cx.jpg\" % current_ep, Cx_visual) ### 我覺得不可以直接存npy,因為太大了!但最後為了省麻煩還是存了,相對就減少see的數量來讓總大小變小囉~\n cv2.imwrite(private_write_dir + \"/\" + \"epoch_%04i_u1b4_Cy.jpg\" % current_ep, Cy_visual) ### 我覺得不可以直接存npy,因為太大了!但最後為了省麻煩還是存了,相對就減少see的數量來讓總大小變小囉~\n\n ### matplot_visual的部分,記得因為用 matplot 所以要 bgr轉rgb,但是因為有用matplot_visual_single_row_imgs,裡面會bgr轉rgb了,所以這裡不用轉囉!\n ### 這部分要記得做!在 train_step3 的 exp_obj.result_obj.Draw_loss_during_train(epoch, self.epochs) 才有畫布可以畫loss!\n ### 目前覺得好像也不大會去看matplot_visual,所以就先把這註解掉了\n # exp_obj.result_obj.sees[see_index].save_as_matplot_visual_during_train(current_ep, bgr2rgb=True)\n\n if(postprocess):\n current_see_name = used_sees[index].see_name.replace(\"/\", \"-\") ### 因為 test 會有多一層 \"test_db_name\"/test_001, 所以把 / 改成 - ,下面 Save_fig 才不會多一層資料夾\n bm, rec = check_flow_quality_then_I_w_F_to_R(dis_img=in_img, flow=flow)\n '''gt不能做bm_rec,因為 real_photo 沒有 C! 所以雖然用 test_blender可以跑, 但 test_real_photo 會卡住, 因為 C 全黑!'''\n # gt_bm, gt_rec = check_flow_quality_then_I_w_F_to_R(dis_img=in_img, flow=gt_flow)\n\n cv2.imwrite(private_rec_write_dir + \"/\" + \"rec_epoch=%04i.jpg\" % current_ep, rec)\n single_row_imgs = Matplot_single_row_imgs(\n imgs =[ in_img , flow_visual , rec, rec_hope], ### 把要顯示的每張圖包成list\n img_titles=[\"in_img\", \"pred_flow_v\", \"pred_rec\", \"rec_hope\"], ### 把每張圖要顯示的字包成list\n fig_title =\"%s, current_ep=%04i\" % (current_see_name, int(current_ep)), ### 圖上的大標題\n add_loss =add_loss,\n bgr2rgb =bgr2rgb) ### 這裡會轉第2次bgr2rgb, 剛好轉成plt 的 rgb\n single_row_imgs.Draw_img()\n single_row_imgs.Save_fig(dst_dir=public_write_dir, name=current_see_name) ### 這裡是轉第2次的bgr2rgb, 剛好轉成plt 的 rgb ### 如果沒有要接續畫loss,就可以存了喔!\n print(\"save to:\", exp_obj.result_obj.test_write_dir)\n", "sub_path": "step08_b_use_G_generate_I_to_F.py", "file_name": "step08_b_use_G_generate_I_to_F.py", "file_ext": "py", "file_size_in_byte": 9553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "step08_b_use_G_generate_0_util.Value_Range_Postprocess_to_01", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 33, "usage_type": "attribute"}, {"api_name": "step08_b_use_G_generate_0_util.F_01_or_C_01_method1_visual_op", "line_number": 36, "usage_type": "call"}, {"api_name": "step08_b_use_G_generate_0_util.F_01_or_C_01_method1_visual_op", "line_number": 37, "usage_type": "call"}, {"api_name": "kong_util.build_dataset_combine.Check_dir_exist_and_build", "line_number": 60, "usage_type": "call"}, {"api_name": "kong_util.build_dataset_combine.Check_dir_exist_and_build", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 74, "usage_type": "call"}, {"api_name": "flow_bm_util.check_flow_quality_then_I_w_F_to_R", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 87, "usage_type": "call"}, {"api_name": "kong_util.matplot_fig_ax_util.Matplot_single_row_imgs", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "331225692", "text": "\"\"\"friends_api URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom api import views\n\nrouter = routers.DefaultRouter()\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n path('', include(router.urls)),\n path('create_user/', views.create_user),\n path('list/', views.list),\n path('receive/', views.receive),\n path('create_friendship/', views.create_friendship),\n path('delete_friendship/', views.delete_friendship)\n]", "sub_path": "friends_api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1165, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "api.views.create_user", "line_number": 26, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "api.views.list", "line_number": 27, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "api.views.receive", "line_number": 28, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "api.views.create_friendship", "line_number": 29, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "api.views.delete_friendship", "line_number": 30, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "53540732", "text": "# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport random\nfrom datetime import datetime\n\nfrom telegram import ParseMode\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,\n ConversationHandler, PicklePersistence)\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nCHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)\n\nreply_keyboard = [\n ['СТРАХ', 'ПЕЧАЛЬ'],\n ['ГНЕВ', 'РАДОСТЬ'],\n ['СТЫД', 'ВИНА']\n]\nmarkup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)\n\n\ndef facts_to_str(user_data):\n facts = list()\n\n for key, value in user_data.items():\n date = value[0].strftime(\"%Y-%m-%d %H:%M\")\n text = value[1]\n facts.append('{}: \\n{}\\n{}'.format(key, date, text))\n\n return \"\\n\".join(facts).join(['\\n', '\\n'])\n\n\ndef start(update, context):\n reply_text = \"Привет! Надеюсь, у тебя всё хорошо. \"\n if context.user_data:\n reply_text += \"Ты уже рассказал мне о своих эмоциях ({}). Что ты \" \\\n \"чувствуешь сейчас?\".format(\", \".join(context.user_data.keys()))\n else:\n reply_text += \"Как ты себя сейчас чувствуешь?\"\n update.message.reply_text(reply_text, reply_markup=markup)\n\n return CHOOSING\n\n\ndef regular_choice(update, context):\n text = update.message.text.upper()\n context.user_data['choice'] = text\n if context.user_data.get(text):\n reply_text = 'Ты уже испытывал {}, вот твое описание этого: \\n{}\\n' \\\n 'Расскажи, почему ты испытываешь это ' \\\n 'сейчас?'.format(text, context.user_data[text])\n print(context.user_data[text])\n else:\n reply_text = 'Ты испытываешь {}? Расскажи, почему?'.format(text)\n update.message.reply_text(reply_text)\n\n return TYPING_REPLY\n\n\ndef received_information(update, context):\n text = update.message.text\n category = context.user_data['choice']\n print(category)\n print(text)\n print(context.user_data)\n data = [datetime.now(), text]\n if context.user_data.get(category):\n context.user_data[category] += data\n else:\n context.user_data[category] = data\n print(context.user_data[category])\n del context.user_data['choice']\n with open(\"nice_words_data.json\", encoding=\"utf-8\") as nice_words_file:\n nice_words_data = json.load(nice_words_file)\n nice_words = nice_words_data[category]\n update.message.reply_text(\"Хорошо, я тебя услышал! {}\".format(random.choice(nice_words)),\n reply_markup=markup, parse_mode=ParseMode.HTML)\n\n update.message.reply_text(\"А как ты себя чувствуешь сейчас?\")\n\n return CHOOSING\n\n\ndef show_data(update, context):\n update.message.reply_text(\"Вот что ты мне уже рассказал:\"\n \"{}\".format(facts_to_str(context.user_data)))\n\n return CHOOSING\n\n\ndef show_help(update, context):\n with open(\"emotions_data.json\", encoding=\"utf-8\") as emotions_list_file:\n emotions = \"\"\n emotions_data = json.load(emotions_list_file)\n print(emotions_data)\n for x in emotions_data:\n emotions += \"\\n\" + x + \":\\n\"\n for y in emotions_data[x]:\n emotions += y + \"\\n\"\n update.message.reply_text(text=\"Тебе сложно определиться с тем, какие эмоции и чувства ты испытываешь? \"\n \"Держи список, который поможет тебе разобраться!\"\n \"{}\".format(emotions),\n parse_mode=ParseMode.HTML)\n\n\ndef stop(update, context):\n if 'choice' in context.user_data:\n del context.user_data['choice']\n\n update.message.reply_text(\"Вот что ты мне уже рассказал:\"\n \"{}\"\n \"До встреч!\".format(facts_to_str(context.user_data)))\n return ConversationHandler.END\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\n\ndef main():\n with open(\"config.json\") as json_file:\n json_data = json.load(json_file)\n token = json_data[\"TOKEN\"]\n \n pp = PicklePersistence(filename='emotionbot_v3')\n updater = Updater(token, persistence=pp, use_context=True)\n\n dp = updater.dispatcher\n \n regex_str = \"|\".join([x for l in reply_keyboard for x in l])\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n CHOOSING: [MessageHandler(Filters.regex('^({})$'.format(regex_str)), regular_choice)],\n TYPING_CHOICE: [MessageHandler(Filters.text, regular_choice)],\n TYPING_REPLY: [MessageHandler(Filters.text, received_information)],\n },\n\n fallbacks=[MessageHandler(Filters.regex('^Done$'), stop)],\n name=\"my_conversation\",\n persistent=True\n )\n\n dp.add_handler(conv_handler)\n\n show_data_handler = CommandHandler('show_data', show_data)\n dp.add_handler(show_data_handler)\n\n help_handler = CommandHandler('help', show_help)\n dp.add_handler(help_handler)\n \n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "emotionbot_v3.py", "file_name": "emotionbot_v3.py", "file_ext": "py", "file_size_in_byte": 5795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardMarkup", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "json.load", "line_number": 79, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 81, "usage_type": "call"}, {"api_name": "telegram.ParseMode.HTML", "line_number": 82, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 82, "usage_type": "name"}, {"api_name": "json.load", "line_number": 99, "usage_type": "call"}, {"api_name": "telegram.ParseMode.HTML", "line_number": 108, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 108, "usage_type": "name"}, {"api_name": "telegram.ext.ConversationHandler.END", "line_number": 118, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 118, "usage_type": "name"}, {"api_name": "json.load", "line_number": 128, "usage_type": "call"}, {"api_name": "telegram.ext.PicklePersistence", "line_number": 131, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 132, "usage_type": "call"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 137, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 138, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 141, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 141, "usage_type": "call"}, {"api_name": "telegram.ext.Filters", "line_number": 141, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 142, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 142, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 142, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 143, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 143, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 143, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 146, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 146, "usage_type": "call"}, {"api_name": "telegram.ext.Filters", "line_number": 146, "usage_type": "name"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 153, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "106838037", "text": "# mlflow utilization\nimport numpy as np\nimport scipy.sparse as sps\nimport argparse\nimport time\nfrom sklearn.model_selection import train_test_split\nimport copy\nimport os\nimport math\nimport sys\nimport matplotlib.pyplot as plt\nfrom util.graph_manager import GraphManager\nfrom util.runs_util import *\n\nimport mlflow\nfrom util.mlflow_util import *\n\n\nACQ_MODELS = ['vopt--gr', 'sopt--gr', 'db--rkhs', 'mc--gr', 'mc--log', 'mc--probitnorm', 'sopt--hf', 'vopt--hf',\n 'uncertainty--gr', 'uncertainty--log', 'uncertainty--probitnorm', 'rand--gr', 'rand--log', 'rand--probitnorm']\n\n\nGRAPH_PARAMS = {\n 'knn' :10,\n 'sigma' : 3.,\n 'normalized' : True,\n 'zp_k' : 5\n}\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description = 'Run Active Learning experiment on Binary dataset')\n parser.add_argument('--data-root', default='./data/binary/', dest='data_root', type=str, help='Location of data X with labels.')\n parser.add_argument('--num-eigs', default=50, dest='M', type=int, help='Number of eigenvalues for spectral truncation')\n parser.add_argument('--tau', default=0.005, type=float, help='value of diagonal perturbation and scaling of GBSSL models (minus HF)')\n parser.add_argument('--gamma', default=0.1, type=float, help='value of noise parameter to be shared across all GBSSL models (minus HF)')\n parser.add_argument('--delta', default=0.01, type=float, help='value of diagonal perturbation of unnormalized graph Laplacian for HF model.')\n parser.add_argument('--h', default=0.1, type=float, help='kernel width for RKHS model.')\n parser.add_argument('--B', default=5, type=int, help='batch size for AL iterations')\n parser.add_argument('--al-iters', default=100, dest='al_iters', type=int, help='number of active learning iterations to perform.')\n parser.add_argument('--candidate-method', default='rand', type=str, dest='cand', help='candidate set selection method name [\"rand\", \"full\"]')\n parser.add_argument('--candidate-percent', default=0.1, type=float, dest='cand_perc', help='if --candidate-method == \"rand\", then this is the percentage of unlabeled data to consider')\n parser.add_argument('--select-method', default='top', type=str, dest='select_method', help='how to select which points to query from the acquisition values. in [\"top\", \"prop\"]')\n parser.add_argument('--runs', default=5, type=int, help='Number of trials to run')\n parser.add_argument('--lab-start', default=2, dest='lab_start', type=int, help='Number of initially labeled points.')\n parser.add_argument('--metric', default='euclidean', type=str, help='metric name (\"euclidean\" or \"cosine\") for graph construction')\n parser.add_argument('--name', default='binary', dest='experiment_name', help='Name for this dataset/experiment run ')\n args = parser.parse_args()\n\n\n GRAPH_PARAMS['n_eigs'] = args.M\n GRAPH_PARAMS['metric'] = args.metric\n if args.metric == 'cosine': # HYPERSPECTRAL DATA\n GRAPH_PARAMS['sigma'] = None\n GRAPH_PARAMS['zp_k'] = None\n\n if not os.path.exists('tmp/'):\n os.makedirs('tmp/')\n\n # Load in the Dataset\n\n if not os.path.exists(args.data_root + 'X_labels.npz'):\n raise ValueError(\"Cannot find previously saved data at {}\".format(args.data_root + 'X_labels.npz'))\n\n print(\"Loading data at {}\".format(args.data_root + 'X_labels.npz'))\n data = np.load(args.data_root + 'X_labels.npz', allow_pickle=True)\n X, labels = data['X'], data['labels'].flatten()\n N = X.shape[0]\n\n labels[labels == 0] = -1\n\n\n # Load in or calculate eigenvectors, using mlflow IN Graph_manager\n gm = GraphManager()\n evals, evecs = gm.from_features(X, knn=GRAPH_PARAMS['knn'], sigma=GRAPH_PARAMS['sigma'],\n normalized=GRAPH_PARAMS['normalized'], n_eigs=GRAPH_PARAMS['n_eigs'],\n zp_k=GRAPH_PARAMS['zp_k'], metric=GRAPH_PARAMS['metric']) # runs mlflow logging in this function call\n\n # If we are doing a run with the HF model, we need the unnormalized graph Laplacian\n L = None\n if 'hf' in ''.join(ACQ_MODELS):\n prev_run = get_prev_run('GraphManager.from_features',\n GRAPH_PARAMS,\n tags={\"X\":str(X), \"N\":str(X.shape[0])},\n git_commit=None)\n\n url_data = urllib.parse.urlparse(os.path.join(prev_run.info.artifact_uri,\n 'W.npz'))\n path = urllib.parse.unquote(url_data.path)\n W = sps.load_npz(path)\n L = sps.csr_matrix(gm.compute_laplacian(W, normalized=False)) + args.delta**2. * sps.eye(N)\n\n\n # Run the experiments\n print(\"--------------- Parameters for the Run of Experiments -----------------------\")\n print(\"\\tacq_models = %s\" % str(ACQ_MODELS))\n print(\"\\tal_iters = %d, B = %d, M = %d\" % (args.al_iters, args.B, args.M))\n print(\"\\tcand=%s, select_method=%s\" % (args.cand, args.select_method))\n print(\"\\tnum_init_labeled = %d\" % (args.lab_start))\n print(\"\\ttau = %1.6f, gamma = %1.6f, delta = %1.6f, h = %1.6f\" % (args.tau, args.gamma, args.delta, args.h))\n print(\"\\tnumber of runs = {}\".format(args.runs))\n print(\"\\n\\n\")\n ans = input(\"Do you want to proceed with this test?? [y/n] \")\n while ans not in ['y','n']:\n ans = input(\"Sorry, please input either 'y' or 'n'\")\n if ans == 'n':\n print(\"Not running test, exiting...\")\n else:\n\n client = mlflow.tracking.MlflowClient()\n #experiment_name = 'checker2'\n mlflow.set_experiment(args.experiment_name)\n experiment = client.get_experiment_by_name(args.experiment_name)\n\n for i, seed in enumerate(j**2 + 3 for j in range(args.runs)):\n print(\"=======================================\")\n print(\"============= Run {}/{} ===============\".format(i+1, args.runs))\n print(\"=======================================\")\n np.random.seed(seed)\n init_labeled, unlabeled = train_test_split(np.arange(N), train_size=2, stratify=labels)#list(np.random.choice(range(N), 10, replace=False))\n init_labeled, unlabeled = list(init_labeled), list(unlabeled)\n\n params_shared = {\n 'init_labeled': init_labeled,\n 'run': i,\n 'al_iters' : args.al_iters,\n 'B' : args.B,\n 'cand' : args.cand,\n 'select' : args.select_method\n }\n query = 'attributes.status = \"FINISHED\"'\n for key, val in params_shared.items():\n query += ' and params.{} = \"{}\"'.format(key, val)\n\n\n already_completed = [run.data.tags['mlflow.runName'] for run in client.search_runs([experiment.experiment_id], filter_string=query)]\n\n\n if len(already_completed) > 0:\n print(\"Run {} already completed:\".format(i))\n for thing in sorted(already_completed, key= lambda x : x[0]):\n print(\"\\t\", thing)\n print()\n\n np.save('tmp/init_labeled', init_labeled)\n\n for acq, model in (am.split('--') for am in ACQ_MODELS):\n if model == 'hf':\n run_name = \"{}-{}-{:.2f}-{}\".format(acq, model, args.delta, i)\n elif model == 'rkhs':\n run_name = \"{}-{}-{:.2}-{}\".format(acq, model, args.h, i)\n else:\n run_name = \"{}-{}-{:.3f}-{:.3f}-{}-{}\".format(acq, model, args.tau, args.gamma, args.M, i)\n\n if run_name not in already_completed:\n labeled = copy.deepcopy(init_labeled)\n with mlflow.start_run(run_name=run_name) as run:\n # run AL test\n mlflow.log_params(params_shared)\n mlflow.log_artifact('tmp/init_labeled.npy')\n\n if model not in ['hf', 'rkhs']:\n mlflow.log_params({\n 'tau' : args.tau,\n 'gamma' : args.gamma,\n 'M' : args.M\n })\n run_binary(evals, evecs, args.tau, args.gamma, labels, labeled, args.al_iters, args.B,\n modelname=model, acq=acq, cand=args.cand, select_method=args.select_method, verbose=False)\n else:\n if model == 'hf':\n mlflow.log_param('delta', args.delta)\n else:\n mlflow.log_param('h', args.h)\n run_rkhs_hf(labels, labeled, args.al_iters, args.B, h=args.h, delta=args.delta, X=X, L=L,\n modelname=model, acq=acq, cand=args.cand, select_method=args.select_method, verbose=False)\n\n\n # Clean up tmp file\n print(\"Cleaning up files in ./tmp/\")\n if os.path.exists('tmp/init_labeled.npy'):\n os.remove('tmp/init_labeled.npy')\n if os.path.exists('tmp/iter_stats.npz'):\n os.remove('tmp/iter_stats.npz')\n", "sub_path": "bin_run.py", "file_name": "bin_run.py", "file_ext": "py", "file_size_in_byte": 9125, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 67, "usage_type": "call"}, {"api_name": "util.graph_manager.GraphManager", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "scipy.sparse.load_npz", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 91, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 92, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 92, "usage_type": "name"}, {"api_name": "scipy.sparse.eye", "line_number": 92, "usage_type": "call"}, {"api_name": "mlflow.tracking.MlflowClient", "line_number": 111, "usage_type": "call"}, {"api_name": "mlflow.tracking", "line_number": 111, "usage_type": "attribute"}, {"api_name": "mlflow.set_experiment", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 120, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 146, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 157, "usage_type": "call"}, {"api_name": "mlflow.start_run", "line_number": 158, "usage_type": "call"}, {"api_name": "mlflow.log_params", "line_number": 160, "usage_type": "call"}, {"api_name": "mlflow.log_artifact", "line_number": 161, "usage_type": "call"}, {"api_name": "mlflow.log_params", "line_number": 164, "usage_type": "call"}, {"api_name": "mlflow.log_param", "line_number": 173, "usage_type": "call"}, {"api_name": "mlflow.log_param", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "529399830", "text": "from django.shortcuts import render\n\nfrom .forms import SignUpForm \n\n# Create your views here.\ndef home(request):\n\ttitle = \"Welcome\"\n\t# if request.user.is_authenticated(): \n\t# \ttitle = \"Welcome %s\" % (request.user)\n\t# if request.method == \"POST\":\n\t# \tprint request.POST\n\t\n\tform= SignUpForm(request.POST or None)\n\tcontext = {\n\t\t\"title\": title, \n\t\t\"form\":form\n\t}\n\n\t\n\n\tif form.is_valid():\n\t\t#form.save() # save it automatically \n\t\tinstance= form.save(commit=False) # don't save data \n\n\t\tfull_name= form.cleaned_data.get(\"full_name\")\n\t\tif not full_name:\n\t\t\tfull_name=\"New full_name\"\n\t\tinstance.full_name=full_name\n\n\n\n\t\tinstance.save()\n\n\t\t# print instance.email # in terminal\n\n\t\tcontext={\n\t\t\t'title': \"Thank you\"\n\t\t}\n\n\n\treturn render(request, \"home.html\", context)", "sub_path": "src/newsletter/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "forms.SignUpForm", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "403572837", "text": "# -*- coding: UTF-8 -*\n# File:: Udacity_P1 - p1_residual_analysis.py\n# Description::\n# \n# | © Copyright 2015 Llama Logic | #\n__copyright__ = '© 2015 Jared Champion'\n\n__author__ = 'Jared Champion'\n__buildCode__ = 'Udacity_P1.p1_residual_analysis.JC.2015.08.13.12'\n\n\n#import required libraries\nfrom scipy import stats\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\n\n'''\nThis function loads the data for analysis\n@since 2015.08.24\n\n@param csv_f_path :: string :: path to csv file\n'''\ndef load_csv(csv_f_path):\n f = open(csv_f_path)\n csv_data = pd.read_csv(f)\n\n return csv_data\n\n'''\nThis function performs the linear regression model on the data. It is used by predictions()\n\n@since 2015.08.13\n\n@param features :: array model | features :: These are the features used by the linear regression model\nNo default.\n@param values :: pandas dataframe | values :: These are the data values in pandas dataframe format\nNo Default.\n'''\ndef linear_regression(features, values):\n\n #Create an intercept from our features\n intercept = sm.add_constant(features)\n\n\n #Create model from Intercept and Values\n #OLS is our linear regression model (Ordinary Least Squares)\n #more info: http://statsmodels.sourceforge.net/devel/generated/statsmodels.regression.linear_model.OLS.html\n model = sm.OLS(values, intercept)\n\n\n #fit the results to our model\n results = model.fit()\n\n\n #store the results\n params = results.params\n\n #slice added intercept; store\n intercept = results.params[0]\n\n #slice params; store\n params = results.params[1:]\n\n return intercept, params\n\n\n\n\n'''\nThis function performs the predictions on the data\nThis function is based on the Udacity Intro to Data Science Problem Set 3.5 Code\n\n@since 2015.08.13\n\n@param dataframe :: pandas dataframe | dataset :: This is the dataframe of the NYC Data\n\n@return: predictions :: array (test for type)\n'''\ndef predictions(dataframe):\n\n '''\n The below model was used in my P1 Submission, and will be left untouched.\n\n This re-implementation is to test the residuals as an assessment of the fit of linear regression on the data.\n '''\n\n features = dataframe[['rain', 'precipi', 'Hour', 'mintempi', 'maxtempi', 'meanwindspdi', 'meandewpti']]\n dummy_units = pd.get_dummies(dataframe['UNIT'], prefix='unit')\n features = features.join(dummy_units)\n\n # Values\n values = dataframe['ENTRIESn_hourly']\n\n # Perform linear regression\n intercept, params = linear_regression(features, values)\n\n predictions = intercept + np.dot(features, params)\n\n return predictions, values\n\n\n\n\ndef residual_analysis_cyclic(predictedData, actualData):\n '''\n This function displays a representation of the fit of the linear regression model on the data\n\n @since 2015.08.13\n @param predictedData: pandas dataframe :: This is the dataframe of the predicted data from predictions()\n @param actualData: pandas dataframe :: This is the dataframe of the actual data to compare to the predictions\n\n :return: Returns True at end of process\n '''\n plt.plot(actualData - predictedData)\n plt.show()\n\n return True\n\n\ndef residual_analysis_QQ( predictedData ):\n\n x = predictedData #array of SD?\n\n res = stats.probplot(x, plot=plt)\n\n #Show Q-Q Plot\n plt.show()\n\n\ninputData = load_csv('p1-data-v2.csv')\n\npredictedData, trueData = predictions(inputData)\n\n\n#residual_analysis_cyclic(predictedData, trueData)\nresidual_analysis_QQ(predictedData)\n", "sub_path": "p1_residual_analysis.py", "file_name": "p1_residual_analysis.py", "file_ext": "py", "file_size_in_byte": 3516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "statsmodels.api.add_constant", "line_number": 45, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 45, "usage_type": "name"}, {"api_name": "statsmodels.api.OLS", "line_number": 51, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.get_dummies", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "scipy.stats.probplot", "line_number": 127, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "244285836", "text": "from neo4j import GraphDatabase\nfrom pygraph.classes.digraph import digraph\nfrom collections import defaultdict\nimport re\ncoreference_map = defaultdict(list)\n\ncoreference_head = {}\n\ncoreference_id_list = []\n\n\ndef _create_node(tx, uid, label, coreference):\n tx.run(\"CREATE (e:Node {label: $label, uid: $uid, coreference: $coreference}) RETURN e\", label=label, uid=uid, coreference=coreference)\n \ndef _create_edge(tx, src, dst, label):\n tx.run(\"MATCH (src:Node), (dst:Node) WHERE src.uid = $src AND dst.uid = $dst MERGE (src)-[r:edge {label: $label}]->(dst)\", src=src, dst=dst, label=label)\n \ndef _delete_all(tx):\n tx.run(\"MATCH (n) DETACH DELETE n\")\n\ndef create_node(driver, uid, label, coreference=None):\n with driver.session() as session:\n session.write_transaction(_create_node, uid, label, coreference)\n \ndef create_edge(driver, src, dst, label):\n with driver.session() as session:\n session.write_transaction(_create_edge, src, dst, label)\n \ndef delete_all(driver):\n with driver.session() as session:\n session.write_transaction(_delete_all)\n\ndef _is_head_node(node):\n if node.pos() == \"NN\"or node.pos() == \"NE\":\n return True\n else:\n return False\n\ndef _add_coreference_heads_for_graph(graph, coreference_heads):\n coreference_nodes = []\n coref_ids = []\n\n for node in graph:\n if node.coreference:\n coreference_nodes.append(node)\n for node in coreference_nodes:\n coref_id = int(re.findall(r'\\d+', node.coreference)[0])\n \n if coref_id not in coref_ids:\n if _is_head_node(node):\n coref_ids.append(coref_id)\n coreference_heads[coref_id] = node.uid\n \ndef _is_part_of_initial_coreference(nodes, uid, coreference_map):\n current_node = nodes[uid]\n if current_node.coreference:\n coref_id = int(re.findall(r'\\d+', current_node.coreference)[0])\n\n if coref_id not in coreference_map:\n coreference_map[coref_id].append(current_node.coreference)\n return True\n else:\n is_finished = False\n for coref in coreference_map[coref_id]:\n if coref[-1] == \")\":\n is_finished = True\n if not is_finished:\n if current_node.coreference:\n coreference_map[coref_id].append(current_node.coreference)\n return True\n else:\n return False\n\ndef merge_graphs_and_write_to_neo4j(graphs, username, password):\n uri = \"bolt://localhost:7687\"\n driver = GraphDatabase.driver(uri, auth=(username, password))\n delete_all(driver)\n coreference_heads = {}\n added_coreference_nodes = []\n coreference_map = defaultdict(list)\n id_count = 0\n for graph, tree in graphs:\n nodes = graph.nodesMap\n _add_coreference_heads_for_graph(graph, coreference_heads)\n\n for uid in nodes:\n node = nodes[uid]\n # NODE\n label = str(node.text[0])\n \n if node.coreference and _is_part_of_initial_coreference(nodes, uid, coreference_map):\n create_node(driver, uid, label, node.coreference) \n added_coreference_nodes.append(uid)\n elif not node.coreference:\n create_node(driver, uid, label, node.coreference)\n \n # EDGE\n for (src, dst) in digraph.edges(graph):\n label = str(graph.edge_label((src, dst)))\n if label:\n coref_id = None\n if nodes[src].coreference and dst not in added_coreference_nodes:\n coref_id = int(re.findall(r'\\d+', nodes[src].coreference)[0])\n create_edge(driver, coreference_heads.get(coref_id, src), dst, label) \n elif nodes[dst].coreference and src not in added_coreference_nodes:\n coref_id = int(re.findall(r'\\d+', nodes[dst].coreference)[0])\n create_edge(driver, src, coreference_heads.get(coref_id, dst), label) \n else:\n create_edge(driver, src, dst, label)", "sub_path": "neo4j_con/neo4j_con.py", "file_name": "neo4j_con.py", "file_ext": "py", "file_size_in_byte": 4138, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "collections.defaultdict", "line_number": 5, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 47, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 57, "usage_type": "call"}, {"api_name": "neo4j.GraphDatabase.driver", "line_number": 76, "usage_type": "call"}, {"api_name": "neo4j.GraphDatabase", "line_number": 76, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 80, "usage_type": "call"}, {"api_name": "pygraph.classes.digraph.digraph.edges", "line_number": 98, "usage_type": "call"}, {"api_name": "pygraph.classes.digraph.digraph", "line_number": 98, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 103, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "179805459", "text": "import json\nfrom django.core.management.base import BaseCommand\nfrom ... import models\n\nclass Command(BaseCommand):\n\n help = \"Register a new collection\"\n\n def add_arguments(self, parser):\n parser.add_argument('name')\n parser.add_argument('--index')\n parser.add_argument('--loader')\n parser.add_argument('--options')\n\n def handle(self, name, index, loader, options, **kwargs):\n json.loads(options or '{}') # make sure it's valid json\n models.Collection.objects.create(\n title=name.title(),\n name=name,\n index=index or name,\n loader=loader or 'hoover.search.loaders.upload.Loader',\n options=options or '{}',\n )\n", "sub_path": "hoover/search/management/commands/createcollection.py", "file_name": "createcollection.py", "file_ext": "py", "file_size_in_byte": 723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 5, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "329380609", "text": "from contextlib import suppress\n\nimport scrapy\n\nfrom ..utils import unify_title\n\n\nclass FoundicoBaseSpider(scrapy.Spider):\n name = 'foundico'\n start_urls = [\n 'https://foundico.com/icos/'\n ]\n\n def parse(self, response):\n company_pages = response.xpath(\n '//table[contains(@id, \"mn-icos-cont\")]'\n '/tbody/tr/td/child::div[1]/a/@href').extract()\n for company_page in company_pages:\n yield response.follow(company_page, callback=self.parse_company_page)\n\n next_page = response.xpath('//i[.=\"chevron_right\"]/parent::a/@href').extract_first()\n if next_page:\n yield response.follow(next_page, callback=self.parse)\n else:\n return 'Done'\n\n @staticmethod\n def parse_company_page(self, response):\n yield {}\n\n\nclass FoundicoSpider(FoundicoBaseSpider):\n name = 'foundico'\n\n @staticmethod\n def parse_company_page(self, response):\n token_price = response.xpath(\n '//tr[./td[contains(., \"Token price\")]]/child::td[3]/text()').extract_first()\n start_time = response.xpath(\n '//div[@id=\"ico-start\"]/span[@class=\"ico-c-month\"]/text()').re(regex=r'\\w+\\s\\w+')\n end_time = response.xpath(\n '//div[@id=\"ico-end\"]/span[@class=\"ico-c-month\"]/text()').re(regex=r'\\w+\\s\\w+')\n\n data = {\n 'title': unify_title(response.xpath('//h1/text()').extract_first()),\n 'type': response.xpath(\n '//tr[./td[contains(., \"Type\")]]/child::td[3]/text()').extract_first(),\n 'category': response.xpath(\n '//tr[./td[contains(., \"Category\")]]/child::td[3]/a/@href').extract_first(),\n 'verified team': response.xpath(\n '//tr[./td[contains(., \"Verified team\")]]/child::td[3]/text()').extract_first(),\n 'whitelist_of_investors': response.xpath(\n '//tr[./td[contains(., \"Whitelist of investors\")]]/child::td[3]/text()').extract_first(),\n 'kyc_of_investors': response.xpath(\n '//tr[./td[contains(., \"KYC of investors\")]]/child::td[3]/text()').extract_first(),\n 'goal_of_funding': response.xpath(\n '//tr[./td[contains(., \"Goal of funding\")]]/child::td[3]/text()').extract_first(),\n 'tokens_for_sale': response.xpath(\n '//tr[./td[contains(., \"Tokens for sale\")]]/child::td[3]/text()').extract_first(),\n 'token_price': token_price.replace('\\t', '') if token_price else None,\n 'minimum_purchase': response.xpath(\n '//tr[./td[contains(., \"Minimum purchase\")]]/child::td[3]/text()').extract_first(),\n 'airdrop_program': response.xpath(\n '//tr[./td[contains(., \"Airdrop program\")]]/child::td[3]/text()').extract_first(),\n 'bounty_program': response.xpath(\n '//tr[./td[contains(., \"Bounty program\")]]/child::td[3]/text()').extract_first(),\n 'have_escrow_agent': response.xpath(\n '//tr[./td[contains(., \"Have escrow agent\")]]/child::td[3]/text()').extract_first(),\n 'have_working_prototype': response.xpath(\n '//tr[./td[contains(., \"Have working prototype\")]]/child::td[3]/text()').extract_first(),\n 'white_paper': response.xpath(\n '//tr[./td[contains(., \"White paper\")]]/child::td[3]/a/@href').extract_first(),\n 'currencies': response.xpath(\n '//tr[./td[contains(., \"Currencies\")]]/child::td[3]/text()').re(regex=r'\\w+'),\n 'exchange_markets': response.xpath(\n '//tr[./td[contains(., \"Exchange markets\")]]/child::td[3]/a/@href').extract(),\n 'location': response.xpath(\n '//tr[./td[contains(., \"Location\")]]/child::td[3]/text()').extract_first(),\n 'website': response.xpath(\n '//tr[./td[contains(., \"Website\")]]/child::td[3]/a/text()').extract_first(),\n 'start_time': start_time[0]\n + ', ' + response.xpath('//div[@id=\"ico-start\"]/span[@class=\"ico-c-year\"]/text()').re(regex=r'\\d+')[0]\n if start_time else None,\n 'end_time': end_time[0]\n + ', ' + response.xpath('//div[@id=\"ico-end\"]/span[@class=\"ico-c-year\"]/text()').re(regex=r'\\d+')[0]\n if end_time else None\n }\n\n links = response.xpath(\n '//tr[./td[contains(., \"Links\")]]/child::td[3]/a/@href').extract(),\n\n if len(links) > 0:\n for link in links[0]:\n with suppress(IndexError):\n data[link.split('https://')[1].split('/')[0]] = link\n yield data\n", "sub_path": "crypto/crypto/spiders/foundico.py", "file_name": "foundico.py", "file_ext": "py", "file_size_in_byte": 4667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "attribute"}, {"api_name": "utils.unify_title", "line_number": 45, "usage_type": "call"}, {"api_name": "contextlib.suppress", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "390545462", "text": "import pandas as pd\nimport json\nfrom collections import Counter\nfrom itertools import combinations\nimport numpy as np\n\n\ndef read_json(path):\n with open(path, 'r') as f:\n output = json.load(f)\n return output\n\n\nPATH_TEST = './data/test_users.json'\nPATH_TRANSACTIONS = './data/transactions.csv'\nPATH_RATINGS = './data/ratings.csv'\nPATH_BOOKMARKS = './data/bookmarks.csv'\nPATH_CATALOGUE = './data/catalogue.json'\nPATH_PLOTS = './plots'\n\ntransactions = pd.read_csv(PATH_TRANSACTIONS)\nratings = pd.read_csv(PATH_RATINGS)\nbookmarks = pd.read_csv(PATH_BOOKMARKS)\ncatalogue = pd.DataFrame.from_dict(read_json(PATH_CATALOGUE), orient='index')\n# catalogue = pd.read_csv('./data/catalogue.csv')\ntest_users = read_json(PATH_TEST)['users']\n\ncoocurancy = pd.concat([transactions[['user_uid', 'element_uid']], bookmarks[['user_uid', 'element_uid']],\n ratings[ratings['rating'] > 5][['user_uid', 'element_uid']]])\ncoocurancy = [x for x in coocurancy.groupby('user_uid')['element_uid'].apply(lambda x: x.tolist()).to_dict().values()]\n\ncoocurancy = [set(x) for x in coocurancy]\n\ncnt = Counter()\nfor x in coocurancy:\n for el in x:\n cnt[el] += 1\n\ncnt_xy = Counter()\nfor b in coocurancy:\n for xy in combinations(b, 2):\n cnt_xy[xy] += 1\n\n\nfor k, v in cnt.items():\n cnt[k] = np.log(v / len(cnt))\n\nfor k, v in cnt_xy.items():\n cnt_xy[k] = np.log(v / len(cnt_xy))\n\n", "sub_path": "source/pmi.py", "file_name": "pmi.py", "file_ext": "py", "file_size_in_byte": 1401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 28, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 39, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "224924462", "text": "import urllib.request, urllib.parse, urllib.error\nimport xml.etree.ElementTree as ET\n\n# extract all the comment/count values from the url and get the sum of all of them\nurl = 'http://python-data.dr-chuck.net/comments_217218.xml'\n\n# read in the content of the url as a string\ndata = urllib.request.urlopen(url).read() # data is a string of the url contents\n\n# I think this only works because the url in question is *already* xml content\n# transform the string content into a xml tree\ntree = ET.fromstring(data) # the format of the string is xml....\n\n# find all count elements\ncounts = tree.findall('comments/comment/count')\n\n# if you look at the actual xml, you will see the nesting used above\n\n# extract the value of each count element and add it to the total\ntotal = 0\nfor count in counts:\n total += int(count.text)\n\nprint('total: ', total)\n\n", "sub_path": "clear_xml_13.py", "file_name": "clear_xml_13.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 8, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 8, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 8, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 12, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "345483067", "text": "#!/usr/bin/env python3\n\n\nimport asyncio\nimport time\nimport json\nimport uuid\nimport datetime\nimport sys\nimport collections\nimport copy\n\n\n\nfrom labware_driver import LabwareDriver\n\nfrom autobahn.asyncio import wamp, websocket\nfrom autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner \n\n\n\nclass WampComponent(wamp.ApplicationSession):\n \"\"\"WAMP application session for OTOne (Overrides protocol.ApplicationSession - WAMP endpoint session)\n \"\"\"\n\n def onConnect(self):\n \"\"\"Callback fired when the transport this session will run over has been established.\n \"\"\"\n self.join(u\"ot_realm\")\n\n\n @asyncio.coroutine\n def onJoin(self, details):\n \"\"\"Callback fired when WAMP session has been established.\n\n May return a Deferred/Future.\n\n Starts instatiation of robot objects by calling :meth:`otone_client.instantiate_objects`.\n \"\"\"\n print(datetime.datetime.now(),' - labware_client : WampComponent.onJoin:')\n print('\\tdetails: ',str(details))\n if not self.factory._myAppSession:\n self.factory._myAppSession = self\n try:\n self.factory._crossbar_connected = True\n except AttributeError:\n print('ERROR: factory does not have \"crossbar_connected\" attribute')\n\n\n def handshake(client_data):\n \"\"\" FACTORY STUB\n \"\"\"\n print(datetime.datetime.now(),' - labware_client : WampComponent.handshake:')\n print('\\n\\targs: ',locals(),'\\n')\n try:\n self.factory._handshake(client_data)\n except AttributeError:\n print('ERROR: factory does not have \"_handshake\" attribute')\n\n\n def dispatch_message(client_data):\n \"\"\" FACTORY STUB\n \"\"\"\n print(datetime.datetime.now(),' - labware_client : WampComponent.dispatch_message:')\n print('\\n\\targs: ',locals(),'\\n')\n try:\n self.factory._dispatch_message(client_data)\n except AttributeError:\n print('ERROR: factory does not have \"_dispatch_message\" attribute')\n\n\n yield from self.subscribe(handshake, 'com.opentrons.labware_handshake')\n yield from self.subscribe(dispatch_message, 'com.opentrons.labware')\n\n\n def onLeave(self, details):\n \"\"\"Callback fired when WAMP session has been closed.\n :param details: Close information.\n \"\"\"\n print('driver_client : WampComponent.onLeave:')\n print('\\n\\targs: ',locals(),'\\n')\n if self.factory._myAppSession == self:\n self.factory._myAppSession = None\n try:\n self.disconnect()\n except:\n raise\n \n\n def onDisconnect(self):\n \"\"\"Callback fired when underlying transport has been closed.\n \"\"\"\n print(datetime.datetime.now(),' - labware_client : WampComponent.onDisconnect:')\n asyncio.get_event_loop().stop()\n try:\n self.factory._crossbar_connected = False\n except AttributeError:\n print('ERROR: outer does not have \"crossbar_connected\" attribute')\n\n\nclass LabwareClient():\n\n def __init__(self):\n print(datetime.datetime.now(),' - LabwareClient.__init__:')\n print('\\n\\targs: ',locals(),'\\n')\n self.driver_dict = {}\n self.meta_dict = {\n 'drivers' : lambda from_,session_id,name,param: self.drivers(from_,session_id,name,param),\n 'add_driver' : lambda from_,session_id,name,param: self.add_driver(from_,session_id,name,param),\n 'remove_driver' : lambda from_,session_id,name,param: self.remove_driver(from_,session_id,name,param),\n 'callbacks' : lambda from_,session_id,name,param: self.callbacks(from_,session_id,name,param),\n 'meta_callbacks' : lambda from_,session_id,name,param: self.meta_callbacks(from_,session_id,name,param),\n 'set_meta_callback' : lambda from_,session_id,name,param: self.set_meta_callback(from_,session_id,name,param),\n 'add_callback' : lambda from_,session_id,name,param: self.add_callback(from_,session_id,name,param),\n 'remove_callback' : lambda from_,session_id,name,param: self.remove_callback(from_,session_id,name,param),\n 'flow' : lambda from_,session_id,name,param: self.flow(from_,session_id,name,param),\n 'clear_queue' : lambda from_,session_id,name,param: self.clear_queue(from_,session_id,name,param),\n 'connect' : lambda from_,session_id,name,param: self.driver_connect(from_,session_id,name,param),\n 'close' : lambda from_,session_id,name,param: self.driver_close(from_,session_id,name,param),\n 'meta_commands' : lambda from_,session_id,name,param: self.meta_commands(from_,session_id,name,param)\n }\n\n self.in_dispatcher = {\n 'command': lambda from_,session_id,data: self.send_command(from_,session_id,data),\n 'meta': lambda from_,session_id,data: self.meta_command(from_,session_id,data)\n }\n\n self.topic = {\n 'frontend' : 'com.opentrons.frontend',\n 'driver' : 'com.opentrons.driver',\n 'labware' : 'com.opentrons.labware',\n 'bootstrapper' : 'com.opentrons.bootstrapper'\n }\n\n self.clients = {\n # uuid : 'com.opentrons.[uuid]'\n }\n self.max_clients = 4\n\n self.id = str(uuid.uuid4())\n\n self.session_factory = wamp.ApplicationSessionFactory()\n self.session_factory.session = WampComponent\n self.session_factory._myAppSession = None\n self.session_factory._crossbar_connected = False\n self.transport_factory = None\n\n self.transport = None\n self.protocol = None\n\n self.loop = asyncio.get_event_loop()\n\n\n # FUNCTIONS FROM SUBSCRIBER\n def dispatch_message(self, message):\n print(datetime.datetime.now(),' - LabwareClient.dispatch_message:')\n print('\\n\\targs: ',locals(),'\\n')\n try:\n dictum = collections.OrderedDict(json.loads(message.strip(), object_pairs_hook=collections.OrderedDict))\n if 'type' in dictum and 'from' in dictum and 'sessionID' in dictum and 'data' in dictum:\n if dictum['type'] in self.in_dispatcher:\n # if self.client_check(dictum['from']):\n # opportunity to filter, not actually used\n self.in_dispatcher[dictum['type']](dictum['from'],dictum['sessionID'],dictum['data'])\n else:\n print(datetime.datetime.now(),' - ERROR:\\n\\r',sys.exc_info())\n print('type: ',dictum['type'])\n else:\n print(datetime.datetime.now(),' - ERROR:\\n\\r',sys.exc_info())\n except:\n print(datetime.datetime.now(),' - ERROR:\\n\\r',sys.exc_info())\n\n\n # FUNCTIONS FROM PUBLISHER\n def handshake(self, data):\n print(datetime.datetime.now(),' - LabwareClient.handshake:')\n print('\\n\\targs: ',locals(),'\\n')\n \n data_dict = json.loads(data)\n if isinstance(data_dict, dict):\n if 'from' in data:\n print('* data has \"from\"')\n client_id = data_dict['from']\n print('client_id: ',client_id)\n if client_id in self.clients:\n print('* from is a client')\n if 'data' in data_dict:\n if 'message' in data_dict['data']:\n if 'extend' in data_dict['data']['message']:\n print('handshake called again on client ',client_id,'. We could have done something here to repopulate data')\n self.publish( client_id , client_id , client_id ,'handshake','labware','result','already_connected')\n if 'shake' in data_dict['data']['message']:\n self.publish_client_ids(client_id,client_id)\n else:\n print('* from is NOT a client')\n if len(self.clients) > self.max_clients:\n self.publish( 'frontend', '' , 'handshake' , '' , 'labware' , 'result' , 'fail' )\n else:\n if client_id != \"\":\n self.clients[client_id] = 'com.opentrons.'+client_id\n self.publish( 'frontend' , client_id , client_id , 'handshake', 'labware', 'result','success')\n else:\n self.gen_client_id()\n else:\n print('* data does NOT have \"from\"')\n self.gen_client_id()\n \n if 'get_ids' in data_dict:\n publish_client_ids('','')\n else:\n self.gen_client_id()\n\n\n def gen_client_id(self):\n print(datetime.datetime.now(),' - LabwareClient.gen_client_id:')\n print('\\n\\targs: ',locals(),'\\n')\n ret_id = ''\n if len(self.clients) > self.max_clients:\n self.publish( 'frontend', '' , '' , 'handshake' , 'labware' , 'result' , 'fail' )\n else:\n client_id = str(uuid.uuid4())\n self.clients[client_id] = 'com.opentrons.'+client_id\n self.publish( 'frontend' , client_id , client_id , 'handshake' , 'labware' , 'result' , 'success' )\n ret_id = client_id\n return ret_id\n\n\n def client_check(self, id_):\n print(datetime.datetime.now(),' - LabwareClient.client_check:')\n print('\\n\\targs: ',locals(),'\\n')\n if id_ in self.clients:\n return True\n else:\n return False\n\n\n def publish_client_ids(self, id_, session_id):\n print(datetime.datetime.now(),' - LabwareClient.publish_client_ids:')\n print('\\n\\targs: ',locals(),'\\n')\n if id_ in self.clients:\n self.publish( id_ , id_ , session_id, 'handshake' , 'labware' , 'ids' , list(self.clients) )\n else:\n self.publish( 'frontend' , '' , session_id, 'handshake' , 'labware' , 'ids' , list(self.clients) )\n return list(self.clients)\n\n\n def publish(self,topic,to,session_id,type_,name,message,param):\n \"\"\"\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.publish:')\n print('\\n\\targs: ',locals(),'\\n')\n if self.session_factory is not None and topic is not None and type_ is not None:\n if name is None:\n name = 'None'\n if message is None:\n message = ''\n if param is None:\n param = ''\n if self.session_factory is not None:\n if self.session_factory._myAppSession is not None:\n time_string = str(datetime.datetime.now())\n msg = {'time':time_string,'type':type_,'to':to,'sessionID':session_id,'from':self.id,'data':{'name':name,'message':{message:param}}}\n try:\n if topic in self.topic:\n print('TOPIC: ',self.topic)\n print(datetime.datetime.now(),'url topic: ',self.topic.get(topic))\n self.session_factory._myAppSession.publish(self.topic.get(topic),json.dumps(msg))\n else:\n print('TO:',to)\n url_topic = 'com.opentrons.'+to\n print(datetime.datetime.now(),'url topic: ',url_topic)\n self.session_factory._myAppSession.publish(url_topic,json.dumps(msg))\n except:\n print(datetime.datetime.now(),' - publisher.py - publish - error:\\n\\r',sys.exc_info())\n else:\n print(datetime.datetime.now(),' - publisher.py - publish - error: caller._myAppSession is None')\n else:\n print(datetime.datetime.now(),' - publisher.py - publish - error: calller, topic, or type_ is None')\n\n\n # FUNCTIONS FROM HARNESS\n def drivers(self, from_, session_id, name, param):\n \"\"\"\n name: n/a\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),'- LabwareClient.drivers:')\n print('\\n\\targs: ',locals(),'\\n')\n return_list = list(self.driver_dict)\n if name is None:\n name = 'None'\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'drivers',return_list)\n else:\n self.publish(from_,from_,session_id,'labware',name,'drivers',return_list)\n return return_list\n\n\n def add_driver(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver to add_driver\n param: driver object\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.add_driver:')\n print('\\n\\targs: ',locals(),'\\n')\n self.driver_dict[name] = param\n return_list = list(self.driver_dict)\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'drivers',return_list)\n else:\n self.publish(from_,from_,session_id,'labware',name,'drivers',return_list)\n return return_list\n\n\n def remove_driver(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver to be driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.remove_driver:')\n print('\\n\\targs: ',locals(),'\\n')\n del self.driver_dict[name]\n return_list = list(self.driver_dict)\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'drivers',return_list)\n else:\n self.publish(from_,from_,session_id,'labware',name,'drivers',return_list)\n return return_list\n\n\n def callbacks(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.callbacks:')\n print('\\n\\targs: ',locals(),'\\n')\n return_dict = self.driver_dict[name].callbacks()\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'callbacks',return_dict)\n else:\n self.publish(from_,from_,session_id,'labware',name,'callbacks',return_dict)\n return return_dict\n\n\n def meta_callbacks(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - labware_harness.meta_callbacks:')\n print('\\n\\targs: ',locals(),'\\n')\n return_dict = self.driver_dict[name].meta_callbacks()\n self.publish(from_,from_,session_id,'labware',name,'meta_callbacks',return_dict)\n return return_dict\n\n\n def set_meta_callback(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: { meta-callback-name : meta-callback-object }\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.set_meta_callback:')\n print('\\n\\targs: ',locals(),'\\n')\n if isinstance(param,dict):\n return_dict = self.driver_dict.get(name).set_meta_callback(list(param)[0],list(param.values())[0])\n else:\n return_dict = self.driver_dict.get(name).meta_callbacks()\n self.publish(from_,from_,session_id,'labware',name,'meta_callback',return_dict)\n return return_dict\n\n\n def add_callback(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: { callback obj: [messages list] }\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.add_callback:')\n print('\\n\\targs: ',locals(),'\\n')\n return_dict = self.driver_dict[name].add_callback(list(param)[0],list(param.values())[0])\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'callbacks',return_dict)\n else:\n self.publish(from_,from_,session_id,'labware',name,'callbacks',return_dict)\n return return_dict\n\n\n def remove_callback(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: name of callback to remove\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.remove_callback:')\n print('\\n\\targs: ',locals(),'\\n')\n return_dict = self.driver_dict[name].remove_callback(param)\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'callbacks',return_dict)\n else:\n self.publish(from_,from_,session_id,'labware',name,'callbacks',return_dict)\n return return_dict\n\n\n def flow(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.flow:')\n print('\\n\\targs: ',locals(),'\\n')\n return_dict = self.driver_dict.get(name).flow()\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'flow',return_dict)\n else:\n self.publish(from_,from_,session_id,'labware',name,'flow',return_dict)\n return return_dict\n\n\n def clear_queue(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.clear_queue:')\n print('\\n\\targs: ',locals(),'\\n')\n return_dict = self.driver_dict.get(name).clear_queue()\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'clear_queue',return_dict)\n else:\n self.publish(from_,from_,session_id,'labware',name,'clear_queue',return_dict)\n return return_dict\n\n\n def driver_connect(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.driver_connect:')\n print('\\n\\targs: ',locals(),'\\n')\n print('self.driver_dict: ',self.driver_dict)\n print('self.driver_dict[',name,']: ',self.driver_dict[name])\n self.driver_dict[name].connect(from_,session_id)\n\n\n def driver_close(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.driver_close:')\n print('\\n\\targs: ',locals(),'\\n')\n self.driver_dict.get(name).close(from_,session_id)\n\n\n def meta_commands(self, from_, session_id, name, param):\n \"\"\"\n name: name of driver\n param: n/a\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.meta_commands:')\n print('\\n\\targs: ',locals(),'\\n')\n return_list = list(self.meta_dict)\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'meta_commands',return_list)\n else:\n self.publish(from_,from_,session_id,'labware',name,'meta_commands',return_list)\n return return_list\n\n\n def meta_command(self, from_, session_id, data):\n \"\"\"\n\n data should be in the form:\n\n {\n 'name': name,\n 'message': value\n }\n\n where name the name of the driver or None if n/a,\n\n and value is one of two forms:\n\n 1. string\n\n 2. {command:params}\n params --> {param1:value, ... , paramN:value}\n\n\n \"\"\"\n print(datetime.datetime.now(),' - LabwareClient.meta_command:')\n print('\\n\\targs: ',locals(),'\\n')\n if isinstance(data, dict):\n name = data['name']\n value = data['message']\n if name in self.driver_dict:\n if isinstance(value, dict):\n command = list(value)[0]\n params = value[command]\n try:\n self.meta_dict[command](from_,session_id,name,params)\n except:\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'error',str(sys.exc_info()))\n else:\n self.publish(from_,from_,session_id,'labware',name,'error',str(sys.exc_info()))\n print(datetime.datetime.now(),' - meta_command error: ',str(sys.exc_info()))\n elif isinstance(value, str):\n command = value\n try:\n self.meta_dict[command](from_,session_id,name,None)\n except:\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'error',str(sys.exc_info()))\n else:\n self.publish(from_,from_,session_id,'labware',name,'error',str(sys.exc_info()))\n print(datetime.datetime.now(),' - meta_command error: ',sys.exc_info())\n else:\n if isinstance(value, dict):\n command = list(value)[0]\n params = value[command]\n try:\n self.meta_dict[command](from_,session_id,None,params)\n except:\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'error',sys.exc_info())\n else:\n self.publish(from_,from_,session_id,'labware',name,'error',sys.exc_info())\n print(datetime.datetime.now(),' - meta_command error, name not in drivers: ',sys.exc_info())\n elif isinstance(value, str):\n command = value\n try:\n self.meta_dict[command](from_,session_id,None,None)\n except:\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware','None','error',sys.exc_info())\n else:\n self.publish(from_,from_,session_id,'labware','None','error',sys.exc_info())\n print(datetime.datetime.now(),' - meta_command error, name not in drivers: ',sys.exc_info())\n\n\n def send_command(self, from_, session_id, data):\n \"\"\"\n data:\n {\n 'name': \n 'message': or { message : {param:values} } <--- the part the driver cares about\n }\n \"\"\"\n print(datetime.datetime.now(),'LabwareClient.send_command:')\n print('\\n\\targs: ',locals(),'\\n')\n if isinstance(data, dict):\n name = data['name']\n value = data['message']\n if name in self.driver_dict:\n try:\n self.driver_dict[name].send_command(session_id,value)\n except:\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware',name,'error',sys.exc_info())\n else:\n self.publish(from_,from_,session_id,'labware',name,'error',sys.exc_info())\n print(datetime.datetime.now(),' - send_command error: '+sys.exc_info())\n else:\n if from_ == \"\":\n self.publish('frontend',from_,session_id,'labware','None','error',sys.exc_info())\n else:\n self.publish(from_,from_,session_id,'labware','None','error',sys.exc_info())\n print(datetime.datetime.now(),' - send_command_error, name not in drivers: '+sys.exc_info())\n\n\n def _make_connection(self, url_protocol='ws', url_domain='0.0.0.0', url_port=8080, url_path='ws', debug=False, debug_wamp=False):\n print(datetime.datetime.now(),' - LabwareClient._make_connection:')\n print('\\n\\targs: ',locals(),'\\n')\n if self.loop.is_running():\n print('self.loop is running. stopping loop now')\n self.loop.stop()\n print(self.transport_factory)\n coro = self.loop.create_connection(self.transport_factory, url_domain, url_port)\n self.transport, self.protocol = self.loop.run_until_complete(coro)\n #protocoler.set_outer(self)\n if not self.loop.is_running():\n print('about to call self.loop.run_forever()')\n self.loop.run_forever()\n\n\n def connect(self, url_protocol='ws', url_domain='0.0.0.0', url_port=8080, url_path='ws', debug=False, debug_wamp=False, keep_trying=True, period=5):\n print(datetime.datetime.now(),' - LabwareClient.connect:')\n print('\\n\\targs: ',locals(),'\\n')\n if self.transport_factory is None:\n url = url_protocol+\"://\"+url_domain+':'+str(url_port)+'/'+url_path\n\n self.transport_factory = websocket.WampWebSocketClientFactory(self.session_factory,\n url=url,\n debug=debug,\n debug_wamp=debug_wamp)\n\n self.session_factory._handshake = self.handshake\n self.session_factory._dispatch_message = self.dispatch_message\n\n if not keep_trying:\n try:\n print('\\nLabware attempting crossbar connection\\n')\n self._make_connection()\n except:\n print('crossbar connection attempt error:\\n',sys.exc_info())\n pass\n else:\n while True:\n while (self.session_factory._crossbar_connected == False):\n try:\n print('\\nLabware attempting crossbar connection\\n')\n self._make_connection()\n except KeyboardInterrupt:\n self.session_factory._crossbar_connected = True\n except:\n print('crossbar connection attempt error:\\n',sys.exc_info())\n pass\n finally:\n print('\\nCrossbar connection failed, sleeping for 5 seconds\\n')\n time.sleep(period)\n \n\n def disconnect(self):\n print(datetime.datetime.now(),' - LabwareClient.disconnect:')\n print('\\n\\targs: ',locals(),'\\n')\n self.transport.close()\n self.transport_factory = None\n\n\n\n \n \n\n\n\nif __name__ == '__main__':\n\n try:\n #session_factory = wamp.ApplicationSessionFactory()\n #session_factory.session = WampComponent\n #session_factory._myAppSession = None\n\n #url = \"ws://0.0.0.0:8080/ws\"\n #transport_factory = websocket.WampWebSocketClientFactory(session_factory,\n # url=url,\n # debug=False,\n # debug_wamp=False)\n #loop = asyncio.get_event_loop()\n\n print('\\nBEGIN INIT...\\n')\n\n # TRYING THE FOLLOWING IN INSTANTIATE OBJECTS vs here\n # INITIAL SETUP\n print(datetime.datetime.now(),' - INITIAL SETUP - publisher, harness, subscriber ','* * '*10)\n labware_client = LabwareClient()\n\n\n # INSTANTIATE DRIVERS\n print(datetime.datetime.now(),' - INSTANTIATE DRIVERS - labbie_driver ','* * '*10)\n labbie_driver = LabwareDriver()\n\n\n # ADD DRIVERS\n print(datetime.datetime.now(),' - ADD DRIVERS ','* * '*10) \n labware_client.add_driver(labware_client.id,'','labware',labbie_driver)\n print(labware_client.drivers(labware_client.id,'',None,None))\n\n # DEFINE CALLBACKS\n #\n # data_dict format:\n #\n #\n #\n #\n #\n print(datetime.datetime.now(),' - DEFINE CALLBACKS ','* * '*10)\n def frontend(name, from_, session_id, data_dict):\n \"\"\"\n \"\"\"\n print(datetime.datetime.now(),' - labware_client.frontend')\n print('\\n\\targs: ',locals(),'\\n')\n dd_name = list(data_dict)[0]\n dd_value = data_dict[dd_name]\n labware_client.publish('frontend',from_,session_id,'labware',name,dd_name,dd_value)\n \n\n def driver(name, from_, session_id, data_dict):\n \"\"\"\n \"\"\"\n print(datetime.datetime.now(),' - labware_client.driver')\n print('\\n\\targs: ',locals(),'\\n')\n dd_name = list(data_dict)[0]\n dd_value = data_dict[dd_name]\n labware_client.publish('driver',from_,session_id,name,dd_name,dd_value)\n\n\n def bootstrapper(name, from_, session_id, data_dict):\n \"\"\"\n \"\"\"\n print(datetime.datetime.now(),' - labware_client.bootstrapper')\n print('\\n\\targs: ',locals(),'\\n')\n dd_name = list(data_dict)[0]\n dd_value = data_dict[dd_name]\n labware_client.publish('bootstrapper','',session_id,name,dd_name,dd_value)\n\n\n def labware(name, from_, session_id, data_dict):\n \"\"\"\n \"\"\"\n print(datetime.datetime.now(),' - labware_client.labware')\n print('\\n\\targs: ',locals(),'\\n')\n dd_name = list(data_dict)[0]\n dd_value = data_dict[dd_name]\n labware_client.publish('labware','',session_id,name,dd_name,dd_value)\n\n\n\n def none(name, from_, session_id, data_dict):\n \"\"\"\n \"\"\"\n print(datetime.datetime.now(),' - labware_client.none_cb')\n print('\\n\\targs: ',locals(),'\\n')\n dd_name = list(data_dict)[0]\n dd_value = data_dict[dd_name]\n if from_ != session_id:\n labware_client.publish('frontend',from_,session_id,'labware',name,dd_name,dd_value)\n labware_client.publish(from_,from_,session_id,'labware',name,dd_name,dd_value)\n else:\n # next line just for testing\n labware_client.publish('frontend',from_,session_id,'labware',name,dd_name,dd_value)\n \n # ADD CALLBACKS\n labware_client.add_callback('frontend','','labware', {frontend:['frontend']})\n labware_client.add_callback('driver','','labware', {driver:['driver']})\n labware_client.add_callback('bootstrapper','','labware', {bootstrapper:['bootstrapper']})\n labware_client.add_callback('labware','','labware', {labware:['labware']})\n # none is for debugging\n labware_client.add_callback('frontend','','labware', {none:['None']})\n\n\n # ADD METACALLBACKS\n print(datetime.datetime.now(),' - DEFINE AND ADD META-CALLBACKS ','* * '*10)\n def on_connect(from_,session_id):\n print(datetime.datetime.now(),' - labware_client.on_connect')\n print('\\n\\targs: ',locals(),'\\n')\n labware_client.publish(from_,from_,session_id,'connect','labware','result','connected')\n\n def on_disconnect(from_,session_id):\n print(datetime.datetime.now(),' - labware_client.on_disconnect')\n print('\\n\\targs: ',locals(),'\\n')\n labware_client.publish(from_,from_,session_id,'connect','labware','result','disconnected')\n\n def on_empty_queue(from_,session_id):\n print(datetime.datetime.now(),' - labware_client.on_empty_queue')\n print('\\n\\targs: ',locals(),'\\n')\n labware_client.publish(from_,from_,session_id,'queue','labware','result','empty')\n\n labware_client.set_meta_callback(labware_client.id,'','labware',{'on_connect':on_connect})\n labware_client.set_meta_callback(labware_client.id,'','labware',{'on_disconnect':on_disconnect})\n labware_client.set_meta_callback(labware_client.id,'','labware',{'on_empty_queue':on_empty_queue})\n\n # CONNECT TO DRIVERS\n\n print('\\nEND INIT...\\n')\n\n labware_client.connect()\n\n except KeyboardInterrupt:\n pass\n finally:\n print('ALL DONE!')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "labware_client.py", "file_name": "labware_client.py", "file_ext": "py", "file_size_in_byte": 31884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "autobahn.asyncio.wamp.ApplicationSession", "line_number": 22, "usage_type": "attribute"}, {"api_name": "autobahn.asyncio.wamp", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "asyncio.coroutine", "line_number": 32, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 104, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 104, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 140, "usage_type": "call"}, {"api_name": "autobahn.asyncio.wamp.ApplicationSessionFactory", "line_number": 142, "usage_type": "call"}, {"api_name": "autobahn.asyncio.wamp", "line_number": 142, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop", "line_number": 151, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 156, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 159, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 166, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 166, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 166, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 169, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 171, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 176, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 176, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 215, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 229, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 229, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 238, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 250, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 250, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 261, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 261, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 266, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 266, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 267, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 271, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 271, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 272, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 274, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 274, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 274, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 276, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 276, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 278, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 278, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 287, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 304, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 304, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 320, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 320, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 336, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 336, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 351, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 351, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 363, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 363, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 378, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 378, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 393, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 393, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 408, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 408, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 423, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 423, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 438, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 438, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 450, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 450, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 460, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 460, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 491, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 491, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 504, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 506, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 507, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 507, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 507, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 514, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 516, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 517, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 517, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 517, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 526, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 528, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 529, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 529, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 529, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 536, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 538, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 539, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 539, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 539, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 550, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 550, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 560, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 562, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 563, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 563, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 563, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 566, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 568, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 569, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 569, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 569, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 573, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 573, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 588, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 588, "usage_type": "attribute"}, {"api_name": "autobahn.asyncio.websocket.WampWebSocketClientFactory", "line_number": 593, "usage_type": "call"}, {"api_name": "autobahn.asyncio.websocket", "line_number": 593, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 606, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 617, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 621, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 625, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 625, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 655, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 655, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 660, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 660, "usage_type": "attribute"}, {"api_name": "labware_driver.LabwareDriver", "line_number": 661, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 665, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 665, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 677, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 677, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 681, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 681, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 691, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 691, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 701, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 701, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 711, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 711, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 722, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 722, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 743, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 743, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 745, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 745, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 750, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 750, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 755, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 755, "usage_type": "attribute"}]} +{"seq_id": "373338670", "text": "#!/usr/bin/env python3\n\n# Code Jam 2016 - Qualification Round - Problem C\n# Copyright (C) 2016 Andrew Donnellan\n\nimport sys\n\nfrom functools import wraps\nimport errno\nimport os\nimport signal\n\nclass TimeoutError(Exception):\n pass\n\ndef timeout(seconds=10, error_message=os.strerror(errno.ETIME)):\n def decorator(func):\n def _handle_timeout(signum, frame):\n raise TimeoutError(error_message)\n\n def wrapper(*args, **kwargs):\n signal.signal(signal.SIGALRM, _handle_timeout)\n signal.alarm(seconds)\n try:\n result = func(*args, **kwargs)\n finally:\n signal.alarm(0)\n return result\n\n return wraps(func)(wrapper)\n\n return decorator\n\ninp = [int(x) for x in sys.stdin.readlines()[1].split()]\n\n@timeout(3)\ndef prove_jamcoin(jamcoin):\n interpretations = [int(jamcoin, n) for n in range(2,11)]\n divisors = []\n for i, interpretation in enumerate(interpretations):\n divisor = None\n for j in range(2, interpretation // 2):\n if interpretation % j == 0:\n divisor = j\n break\n if not divisor:\n #print(\"No divisor for \" + str(interpretation))\n raise Exception\n else:\n divisors.append(divisor)\n return divisors\n\njamcoins = []\ncount = 0\nwhile len(jamcoins) < inp[1]:\n jamcoin = \"1\" + bin(count)[2:].zfill(inp[0] - 2) + \"1\"\n #print(\"Proving jamcoin \" + jamcoin)\n try:\n jamcoins.append((jamcoin, prove_jamcoin(jamcoin)))\n except (Exception, TimeoutError):\n pass\n count += 1\n\nprint(\"Case #1:\")\nfor (jamcoin, proof) in jamcoins:\n print(\"{} {}\".format(jamcoin, \" \".join([str(p) for p in proof])))\n", "sub_path": "codes/CodeJamCrawler/16_0_3/ajdlinux/jamcoin.py", "file_name": "jamcoin.py", "file_ext": "py", "file_size_in_byte": 1737, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.strerror", "line_number": 16, "usage_type": "call"}, {"api_name": "errno.ETIME", "line_number": 16, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 22, "usage_type": "call"}, {"api_name": "signal.SIGALRM", "line_number": 22, "usage_type": "attribute"}, {"api_name": "signal.alarm", "line_number": 23, "usage_type": "call"}, {"api_name": "signal.alarm", "line_number": 27, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stdin.readlines", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "347939872", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nLicense: The usage of the code is limited to research. Commercial use of the code is not allowed.\n\"\"\"\n\nimport re\nimport sys\nfrom collections import Counter\n\nimport nltk\nfrom nltk.corpus import stopwords, wordnet\nfrom nltk.tag.stanford import StanfordPOSTagger\n\n#from DescriptionPerDescriptionEvaluation import Documents, Evaluation\n\n#path_to_model = \"../stanford-postagger-full-2016-10-31/models/english-bidirectional-distsim.tagger\"\n#path_to_jar = \"../stanford-postagger-full-2016-10-31/stanford-postagger.jar\"\n\npath_to_model = \"../stanford-postagger-2016-10-31/models/english-bidirectional-distsim.tagger\"\npath_to_jar = \"../stanford-postagger-2016-10-31/stanford-postagger.jar\"\n\nst_tagger = StanfordPOSTagger(path_to_model, path_to_jar)\n\n\"\"\"\n This class is able to extract app features from descriptions. This algorithm is optimized for the following apps\n\"\"\"\n\n\nclass SAFE:\n def __init__(self):\n self.debug = False\n self.test = False # use a test string instead of the whole description\n self.feature_word_threshold = 4\n self.test_string = \"\"\"View microsoft office documents, PDFs, photos, videos, and more\"\"\".lower()\n self.feature_list = []\n self.raw_feature_list = []\n self.subordinate_conjunctions = [\"after\", \"although\", \"because\", \"before\", \"even if\", \"even though\", \"if\",\n \"in order that\tonce\", \"provided that\", \"rather than\", \"since\", \"so that\",\n \"than\", \"that\", \"though\", \"unless\", \"until\", \"when\", \"whenever\", \"where\",\n \"whereas\", \"wherever\", \"whether\", \"while\", \"why\"] # \"as\"\n self.relative_pronouns = [\"that\", \"which\", \"whichever\", \"who\", \"whoever\", \"whom\", \"whose\", \"whosever\",\n \"whomever\"]\n self.review_filter_keywords = [\"crash\", \"crashed\", \"crashes\", \"app\", \"apps\", \"version\", \"gmail\", \"love\", \"hate\",\n \"please\", \"fix\", \"easy\", \"use\", \"have\", \"is\", \"using\", \"do\", \"help\", \"great\",\n \"think\", \"need\", \"version\", \"versions\", \"does\", \"piss\", \"good\", \"trash\", \"thank\",\n \"bug\", \"try\", \"well\",\"ios\", \"yahoo\"]\n\n self.review_filter_keywords + stopwords.words('english')\n self.stoplist = set(stopwords.words('english'))\n self.stoplist = {\"to\"}\n self.symbol_pattern = r\"[a-zA-Z]\"\n self.bracket_pattern = r\"\\([^)]*\\)\"\n self.email_pattern = \"r'[\\w\\.-]+@[\\w\\.-]+'\"\n grammar = r\"\"\"\n NP: {+}\n VN: {+}\n VPN:{??+}\n VAN:{+}\n VIN:{?}\n NJN:{+}\n NN:{+}\n \"\"\"\n review_grammar = r\"\"\"\n VN: {+}\n VPN:{??+}\n VIN:{?}\n NN:{+}\n \"\"\"\n self.lemmatizer = nltk.WordNetLemmatizer()\n self.parser = nltk.RegexpParser(grammar)\n self.review_parser = nltk.RegexpParser(review_grammar)\n self.words_not_to_lemmatize = [\"sms\", \"ics\", \"use\", \"uses\"]\n self.sentence_filter = [\"www\", \"http\", \"https\", \".com\", \".de\"]\n self.sentence_filter_quotes = [\"\\\"\", \"“\", \"”\"]\n self.app_name = \"evernote\".lower()\n self.description = \"\"\n\n def extract_sentences(self, input_string):\n split_sentences = input_string.split(\"\\n\")\n tmp_sentences = []\n\n sentences = []\n for sentence in split_sentences:\n for s in nltk.sent_tokenize(sentence):\n tmp_sentences.append(s)\n\n for sentence in tmp_sentences:\n for s in sentence.split(\":\") and sentence.split(\" - \"):\n sentences.append(s.strip())\n\n return sentences\n\n def get_wordnet_pos(self, tag):\n if tag.startswith('J'):\n return wordnet.ADJ\n elif tag.startswith('V'):\n return wordnet.VERB\n elif tag.startswith('N'):\n return wordnet.NOUN\n elif tag.startswith('R'):\n return wordnet.ADV\n else:\n return wordnet.NOUN\n\n def lemmatize_sentences(self, tokens_per_sentence):\n lemmatized_tokens = []\n for tokens in tokens_per_sentence:\n tokens_per_sentence_tmp = []\n for (token, tag) in tokens:\n tokens_per_sentence_tmp.append(self.lemmatizer.lemmatize(token, self.get_wordnet_pos(tag)))\n lemmatized_tokens.append(tokens_per_sentence_tmp)\n return lemmatized_tokens\n\n def remove_text_in_brackets(self, sentences):\n p = re.compile(self.bracket_pattern)\n processed_sentences = []\n for sentence in sentences:\n processed_sentences.append(re.sub(p, '', sentence))\n return processed_sentences\n\n def remove_symbols(self, input_list):\n \"\"\"test the first two and the last char of a sentence. If it does not contain a letter remove that part\"\"\"\n output_list = []\n for s in input_list:\n s = s.strip()\n s = re.sub(\":\", \"\", s)\n if s and len(s) >= 2:\n last_char_pos = s.__len__() - 1\n # check the first two chars and transform the string if necessary\n if not re.match(self.symbol_pattern, s[0]):\n if not re.match(self.symbol_pattern, s[1]):\n s = s[2:]\n last_char_pos -= 2\n else:\n s = s[1:]\n last_char_pos -= 1\n\n # check the last char and transform the string if necessary\n if len(s) <= last_char_pos:\n if not re.match(self.symbol_pattern, s[last_char_pos]):\n s = s[:last_char_pos]\n\n output_list.append(s)\n\n if self.debug:\n print(\"symbol remover------->\")\n for output in output_list:\n print(output)\n print(\"<------symbol remover\\n\")\n return output_list\n\n def remove_symbols_from_review(self, input_list):\n output_list = []\n for s in input_list:\n s = s.strip()\n s = re.sub(r\"\\W+\", \" \", s)\n s = re.sub(\"\\\"\", \"\", s)\n\n output_list.append(s)\n\n return output_list\n\n def remove_subordinate_clauses(self, sentences):\n processed_sentences = []\n for sentence in sentences:\n # check if there is a subordinate conjunction in the beginning or in the middle of the sentence\n if not any(word in sentence.split(\" \") for word in self.subordinate_conjunctions):\n processed_sentences.append(sentence)\n # for word in sentence.split(\" \"):\n # if word in self.subordinate_conjunctions:\n # print(word)\n # print(sentence)\n\n return processed_sentences\n\n def filter_sentences(self, sentences):\n processed_sentences = []\n # remove sentences that e.g. contain URLs because they are used to address the contact to the publisher\n for sentence in sentences:\n if not any(word in sentence for word in self.sentence_filter):\n processed_sentences.append(sentence)\n\n # remove sentences that contain quotations since they are reviews/statements of external people\n tmp_processed_sentences = processed_sentences\n processed_sentences = []\n for sentence in tmp_processed_sentences:\n quote_counter = 0\n for quote in self.sentence_filter_quotes:\n quote_counter += sentence.count(quote)\n if quote_counter < 2:\n processed_sentences.append(sentence)\n\n # remove sentences that contain email addresses as they are used for providing a contact to the publisher\n tmp_processed_sentences = processed_sentences\n processed_sentences = []\n for sentence in tmp_processed_sentences:\n if re.search(self.email_pattern, sentence) is None:\n processed_sentences.append(sentence)\n\n return processed_sentences\n\n def tokenize_sentence(self, sentences):\n index = 0\n tokens_per_sentence = []\n if type(sentences) is list:\n for sentence in sentences:\n tokens_per_sentence.append(nltk.word_tokenize(sentence))\n index += 1\n return tokens_per_sentence\n elif type(sentences) is str:\n tokens_per_sentence.append(nltk.word_tokenize(sentences))\n return tokens_per_sentence\n\n def tokenize_reviews(self, sentences):\n index = 0\n tokens_per_sentence = []\n if type(sentences) is list:\n for sentence in sentences:\n tokens_per_sentence.append(sentence.split())\n index += 1\n return tokens_per_sentence\n elif type(sentences) is str:\n tokens_per_sentence.append(sentences.split())\n return tokens_per_sentence\n\n def expand_all_contractions(self, sentences):\n \"\"\"Expands all contractions within the documents\n :returns documents with expanded contractions\"\"\"\n expanded_sentences = []\n for sentence in sentences:\n expanded_tokens = \"\"\n for token in sentence.split():\n expanded_tokens += (\" \" + (self.expand_contraction(token)))\n expanded_sentences.append(expanded_tokens)\n\n return expanded_sentences\n\n def expand_contraction(self, word):\n \"\"\"expands word if word is a contraction\n :param word to check\n :returns expanded word\"\"\"\n contractions = {\n \"ain't\": \"am not\", # are not; is not; has not; have not\",\n \"aren't\": \"are not\", # ; am not\",\n \"can't\": \"cannot\",\n \"can't've\": \"cannot have\",\n \"'cause\": \"because\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hadn't've\": \"had not have\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he had\", # , / he would\",\n \"he'd've\": \"he would have\",\n \"he'll\": \"he shall\", # / he will\",\n \"he'll've\": \"he shall have\", # / he will have\",\n \"he's\": \"he has\", # / he is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how has\", # / how is / how does\",\n \"i'd\": \"i had\", # / I would\",\n \"i'd've\": \"i would have\",\n \"i'll\": \"i will\", # / I shal\",\n \"i'll've\": \"i will have\", # / I shall have\",\n \"i'm\": \"i am\",\n \"i've\": \"i have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it would\", # / it had\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\", # / it shall\",\n \"it'll've\": \"it will have\", # / it shall have\",\n \"it's\": \"it is\", # / it has\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"shan't\": \"shall not\",\n \"sha'n't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she had\", # / she would\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she shall\", # / she will\",\n \"she'll've\": \"she shall have\", # / she will have\",\n \"she's\": \"she is\", # / she has\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so've\": \"so have\",\n \"so's\": \"so is\", # / so as\",\n \"that'd\": \"that would\", # / that had\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\", # / that has\",\n \"there'd\": \"there had\", # / / there would\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\", # / there has\",\n \"they'd\": \"they had\", # / they would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they shall / they will\",\n \"they'll've\": \"they will have\", # / they shall have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we had \", # / we would\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"what will\", # / what will\",\n \"what'll've\": \"what will have\", # / what shall have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\", # / what has\",\n \"what've\": \"what have\",\n \"when's\": \"when is \", # / when has\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\", # / where has\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\", # / who will\",\n \"who'll've\": \"who will have \", # / who will have\",\n \"who's\": \"who is\", # / who has\",\n \"who've\": \"who have\",\n \"why's\": \"why is\", # / why has\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you had\", # / you would\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you will\", # / you shall\",\n \"you'll've\": \"you will have\", # / you shall have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\"\n }\n\n if word in contractions.keys():\n word = contractions[word]\n return word\n\n def remove_stopwords(self, tokens_per_sentence, extract_from_review=False):\n stopped_tokens_per_sentence = []\n self.stoplist.add(self.app_name)\n if extract_from_review:\n for item in self.review_filter_keywords:\n self.stoplist.add(item)\n self.stoplist |= set(stopwords.words('english'))\n\n for tokens in tokens_per_sentence:\n tokens_per_sentence_tmp = []\n for token in tokens:\n if token not in self.stoplist:\n tokens_per_sentence_tmp.append(token)\n else:\n tokens_per_sentence_tmp.append(\" \")\n # print(\"tokens_per_sentence_tmp: \", tokens_per_sentence_tmp)\n stopped_tokens_per_sentence.append(tokens_per_sentence_tmp)\n\n return stopped_tokens_per_sentence\n\n def pos_tag_tokenized_sentences(self, tokens_per_sentence):\n pos_tags_per_sentence = []\n for tokens in tokens_per_sentence:\n # just create pos tags if the token array is not empty\n if tokens:\n pos_tags_per_sentence.append(st_tagger.tag(tokens))\n # print(nltk.pos_tag(tokens))\n\n if self.debug:\n for d in pos_tags_per_sentence:\n for s in d:\n print(s[0] + \"_\" + s[1], end=\" \")\n print(\"\\n\")\n\n return pos_tags_per_sentence\n\n def extract_features_from_complex_lists(self, tokens_per_sentence):\n \"\"\"extracts features from complex lists in sentences like:\n send and receive videos, messages and emojis\n Features:\n - send videos\n - send message\n - send emojis\n - receive videos\n - receive message\n - receive emojis\n\n The extracted features are added to self.feature_list.\n Sentences that contain those features are removed from tokens_per_sentence.\n The remaining sentences are returned for further processing.\n \"\"\"\n\n # 1. check for indicators of such sentences by counting the conjunction \"and\" and \"or\"\n \"\"\" possible cases\n case 1: \"Write, collect and capture ideas as searchable notes, notebooks, checklists and to-do lists\"\n case 2: \"Discuss and annotate notes and drafts\"\n case 3: \"Use camera capture to easily scan and comment on pieces of paper, including printed documents,\n business cards, handwriting and sketches\"\n case 4: \"to let you message and call friends and family, so you don't have to pay for every message or call\"\n case 5: \"send and receive attachments\"\n case 6: \"View documents, PDFs, photos, videos, and more\"\n \"\"\"\n remaining_sentences = []\n for tokens in tokens_per_sentence:\n conj_counter = Counter([j if i == 'and' and j == \"CC\" else None for i, j in tokens])\n cc_on_right_side = None\n is_case_1 = False\n is_case_2 = False\n is_case_3 = False\n is_case_4 = False\n is_case_5 = False\n is_case_6 = False\n if conj_counter[\"CC\"] >= 1:\n # check which case we have\n try:\n left_side_tokens = tokens[:tokens.index(('and', 'CC'))]\n right_side_tokens = tokens[tokens.index(('and', 'CC')):]\n if conj_counter[\"CC\"] == 1 and len(right_side_tokens) < len(left_side_tokens):\n cc_on_right_side = True\n\n except ValueError:\n left_side_tokens = None\n right_side_tokens = None\n\n # check for case 1-4\n if left_side_tokens:\n # check if there is a comma on the left side, which would indicate case 1\n # if there are no commas, we have to check if we have case 2 or 3\n left_side_comma_counter = Counter([j if \",\" == j else None for i, j in left_side_tokens])\n right_side_comma_counter = Counter([j if \",\" == j else None for i, j in right_side_tokens])\n if conj_counter[\"CC\"] >= 2:\n if left_side_comma_counter[\",\"] > 0:\n is_case_1 = True\n elif left_side_comma_counter[\",\"] == 0 and len(\n left_side_tokens) < 3: # just allow a max. of 2 words\n is_case_2 = True\n elif left_side_comma_counter[\",\"] == 0 and right_side_comma_counter[\",\"] > 0:\n if right_side_comma_counter[\",\"] > 1:\n is_case_3 = True\n else:\n is_case_4 = True\n elif cc_on_right_side and left_side_comma_counter[\",\"] >= 1:\n is_case_6 = True\n # check for case 5\n # check if there is a verb on the left and on the right side of a conjunction\n try:\n left_side_is_verb = tokens[tokens.index(('and', 'CC')) - 1:tokens.index(('and', 'CC'))][0][\n 1] == \"VB\"\n right_side_is_verb = tokens[tokens.index(('and', 'CC')) + 1:tokens.index(('and', 'CC')) + 2][0][\n 1] == \"VB\"\n if left_side_is_verb and right_side_is_verb:\n is_case_5 = True\n except IndexError:\n # ignore\n is_case_5 = False\n\n if is_case_1:\n # print(\"CASE 1\")\n self.extract_from_case_1(tokens)\n elif is_case_2:\n # print(\"CASE 2\")\n self.extract_from_case_2(tokens)\n elif is_case_3:\n # print(\"CASE 3\")\n self.extract_from_case_3(tokens)\n elif is_case_4:\n # print(\"CASE 4\")\n self.extract_from_case_4(tokens)\n elif is_case_5:\n # print(\"CASE 5\")\n self.extract_from_case_5(tokens)\n elif is_case_6:\n # print(\"CASE 6\")\n self.extract_from_case_6(tokens)\n else:\n remaining_sentences.append(tokens)\n\n return remaining_sentences\n\n def extract_from_case_1(self, tokens):\n # remove POS tags and transform tokens to string\n left_side = \"\"\n for t in tokens[:tokens.index(('and', 'CC')) + 2]:\n left_side += \" %s\" % str(t[0]).lower()\n\n right_side = \"\"\n for t in tokens[tokens.index(('and', 'CC')) + 2:]:\n right_side += \" %s\" % str(t[0])\n\n starting_feature_words = left_side.split(\" and \")[0].split(\",\")\n starting_feature_words.append(left_side.split(\" and \")[1])\n starting_feature_words = [x.strip() for x in starting_feature_words]\n\n combining_features = right_side.split(\" and \")[0].split(\",\")\n combining_features.append(right_side.split(\" and \")[1])\n combining_features = [x.strip() for x in combining_features]\n\n for s in starting_feature_words:\n for c in combining_features:\n if s and c:\n # self.add_feature(\"%s %s\" % (s, c))\n self.add_raw_feature(\"%s %s\" % (s, c))\n # print(\"%s %s\" % (s, c))\n\n def extract_from_case_2(self, tokens):\n # the code of self.extract_from_case_1 also works for this case, but we still have to differentiate both\n # cases because the sentence structure is different\n self.extract_from_case_1(tokens)\n\n def extract_from_case_3(self, tokens):\n # remove POS tags and transform tokens to string\n left_side = \"\"\n for t in tokens[:tokens.index(('and', 'CC')) + 2]:\n left_side += \" %s\" % str(t[0]).lower()\n\n right_side = \"\"\n for t in tokens[tokens.index(('and', 'CC')) + 2:]:\n right_side += \" %s\" % str(t[0])\n\n no_of_words = len(left_side.split(\" and \")[0].split(\" \"))\n if no_of_words > 2: # just allow two word combination, if there are more words = substring to the last two\n # create a substring of the last two words in front of the word \"and\"\n left_side = \" \".join((left_side.rsplit(\" \", 4))[1:])\n starting_feature_words = left_side.split(\" and \")[0].split(\",\")\n starting_feature_words.append(left_side.split(\" and \")[1])\n starting_feature_words = [x.strip() for x in starting_feature_words]\n\n combining_features = right_side.split(\" and \")[0].split(\",\")\n combining_features.append(right_side.split(\" and \")[1])\n combining_features = [x.strip() for x in combining_features]\n\n for s in starting_feature_words:\n for c in combining_features:\n if s and c:\n # self.add_feature(\"%s %s\" % (s, c))\n self.add_raw_feature(\"%s %s\" % (s, c))\n # print(\"%s %s\" % (s, c))\n\n def extract_from_case_4(self, tokens):\n \"\"\"in this case we do not care about the right side of the comma, as we assume that\n having a single comma & more than 3 words after it and before any other conjunction,\n we do not have to combine the left side with the right one as they don't belong together\n \"\"\"\n # remove POS tags and transform tokens to string\n allowed_pos_tags = (\"V\", \"J\", \"N\", \"C\")\n left_side = \"\"\n for t in tokens[:tokens.index(('and', 'CC')) + 2]:\n if str(t[1]).startswith(allowed_pos_tags):\n left_side += \" %s\" % str(t[0]).lower()\n else:\n left_side = \"\"\n\n right_side = \"\"\n for t in tokens[tokens.index(('and', 'CC')) + 2:tokens.index((',', ','))]:\n right_side += \" %s\" % str(t[0])\n\n starting_feature_words = left_side.split(\" and \")[0].split(\",\")\n starting_feature_words.append(left_side.split(\" and \")[1])\n starting_feature_words = [x.strip() for x in starting_feature_words]\n\n combining_features = right_side.split(\" and \")[0].split(\",\")\n # print(\"\\n\", tokens, \"\\n\")\n if len(right_side.split(\" and \")) > 1:\n combining_features.append(right_side.split(\" and \")[1])\n combining_features = [x.strip() for x in combining_features]\n\n for s in starting_feature_words:\n for c in combining_features:\n if s and c:\n # self.add_feature(\"%s %s\" % (s, c))\n self.add_raw_feature(\"%s %s\" % (s, c))\n # print(\"%s %s\" % (s, c))\n\n def extract_from_case_5(self, tokens):\n # remove POS tags and transform tokens to string\n left_side = []\n for t in tokens[:tokens.index(('and', 'CC')) + 2]:\n if not t[0] == \"and\":\n left_side.append(str(t[0]).lower().strip())\n\n right_side = \"\"\n for t in tokens[tokens.index(('and', 'CC')) + 2:]:\n right_side += \" %s\" % str(t[0])\n\n for l in left_side:\n self.add_raw_feature(\"%s %s\" % (l.strip(), right_side.strip()))\n\n def extract_from_case_6(self, tokens):\n # remove POS tags and transform tokens to string\n left_side = tokens[0][0]\n\n right_side_str = \"\"\n for t in tokens[1:]:\n right_side_str += \" %s\" % str(t[0])\n\n right_side = []\n for cc in right_side_str.split(\"and\"):\n for part in cc.split(\",\"):\n part = part.strip()\n if part:\n right_side.append(part)\n\n for r in right_side:\n self.add_raw_feature(\"%s %s\" % (left_side.strip(), r.strip()))\n\n def chunk_sentences(self, pos_tagged_sentences):\n pos_tagged_sentences += self.raw_feature_list\n chunks = []\n if self.debug:\n print(\"chunk sentences --------------------->\")\n for sentence in pos_tagged_sentences:\n print(\"pos_tagged_sentences\", sentence)\n print(\"<--------------------- chunk sentences\\n\")\n for sentence in pos_tagged_sentences:\n try:\n result = self.parser.parse(sentence)\n for subtree in result.subtrees():\n if subtree.label() == 'NP' or \\\n subtree.label() == 'VN' or \\\n subtree.label() == 'VPN' or \\\n subtree.label() == 'VAN' or \\\n subtree.label() == 'VIN' or \\\n subtree.label() == 'NN' or \\\n subtree.label() == 'NJN':\n feature = \"\"\n for element in subtree:\n if len(element) > 2:\n for e in element:\n feature += \" %s\" % (str(e[0]).strip())\n else:\n feature += \" %s\" % (str(element[0]).strip())\n chunks.append(feature)\n if len(feature.strip().split(\" \")) >= 2: # do not add features that are just a single word\n self.add_feature(feature.strip().lower())\n except:\n print(\"err: \", sentence)\n sys.stderr.write()\n pass\n\n return chunks\n\n def chunk_review_sentences(self, pos_tagged_sentences):\n pos_tagged_sentences += self.raw_feature_list\n chunks = []\n if self.debug:\n print(\"chunk sentences --------------------->\")\n for sentence in pos_tagged_sentences:\n print(\"pos_tagged_sentences\", sentence)\n print(\"<--------------------- chunk sentences\\n\")\n for sentence in pos_tagged_sentences:\n try:\n result = self.parser.parse(sentence)\n for subtree in result.subtrees():\n if subtree.label() == 'NP' or \\\n subtree.label() == 'VN' or \\\n subtree.label() == 'VPN' or \\\n subtree.label() == 'VIN' or \\\n subtree.label() == 'NN':\n feature = \"\"\n for element in subtree:\n if len(element) > 2:\n for e in element:\n feature += \" %s\" % (str(e[0]).strip())\n else:\n feature += \" %s\" % (str(element[0]).strip())\n chunks.append(feature)\n if len(feature.strip().split(\" \")) >= 2: # do not add features that are just a single word\n self.add_feature(feature.strip().lower())\n except:\n print(\"err: \", sentence)\n sys.stderr.write()\n pass\n\n return chunks\n\n def post_filter_app_features(self):\n \"\"\" This function\n\n 1. removes the features from the self.feature_list that have too many words.\n As language ambiguity is very high, we cannot cover all possibilities in the choice of words and\n sentence structures. Therefore, the final list might contain wrong entries, which are typically\n long sentences. This methods removes those self.feature_list entries that have more words than specified in\n self.feature_word_threshold.\n\n 2. filters features that contain non-informative information. This method is keyword based and uses\n keywords that the authors found by looking into extracted features.\n For example the sentence: \"view documents and more\" resolves in the app features: \"view documents\" and\n \"view more\", whereas, the second feature is labeled as non-informative, since 'more' is not defined\n\n 3. filter app features that contain grammar subtrees because of their complexity\n\n 4. remove duplicates\n \"\"\"\n # 1.\n self.feature_list = [feature for feature in self.feature_list if\n not len(feature.split(\" \")) > self.feature_word_threshold]\n\n # 3.\n self.feature_list = [feature for feature in self.feature_list if\n \"(\" not in feature]\n\n # 4.\n self.feature_list = set(self.feature_list)\n\n def post_filter_app_features_from_reviews(self):\n \"\"\" This function\n\n 1. removes the features from the self.feature_list that have too many words.\n As language ambiguity is very high, we cannot cover all possibilities in the choice of words and\n sentence structures. Therefore, the final list might contain wrong entries, which are typically\n long sentences. This methods removes those self.feature_list entries that have more words than specified in\n self.feature_word_threshold.\n\n 2. filters features that contain non-informative information. This method is keyword based and uses\n keywords that the authors found by looking into extracted features.\n For example the sentence: \"view documents and more\" resolves in the app features: \"view documents\" and\n \"view more\", whereas, the second feature is labeled as non-informative, since 'more' is not defined\n\n 3. filter app features that contain grammar subtrees because of their complexity\n\n 4. do not allow word duplicates in one app feature like: \"email email\"\n\n 5. remove duplicates\n \"\"\"\n # 1.\n self.feature_list = [feature for feature in self.feature_list if\n not len(feature.split(\" \")) > self.feature_word_threshold]\n\n # 2.\n # self.review_filter_keywords = stopwords.words('english')\n # self.feature_list = [feature for feature in self.feature_list if\n # not any(word in feature for word in self.review_filter_keywords)]\n\n # 3.\n self.feature_list = [feature for feature in self.feature_list if \"(\" not in feature]\n\n # 4.\n self.feature_list = [feature for feature in self.feature_list if feature.split()[0] != feature.split()[1]]\n\n # 5.\n self.feature_list = set(self.feature_list)\n\n def print_final_features(self):\n print(\"\\n Features:\")\n for feature in set(self.feature_list):\n print(feature)\n\n def write_features_to_file(self):\n feature_file = open(\"feature_file_appdescriptions.txt\", \"a\")\n feature_file.write(\"\\n\\n-------\\n%s\\n------\\n\" % self.app_name)\n for feature in self.feature_list:\n feature_file.write(\"%s\\n\" % feature)\n\n def debug_sentences(self, sentences):\n # print(\"sentences------->\")\n for sentence in sentences:\n print(sentence)\n print(\"<------sentences\\n\\n\")\n\n def add_raw_feature(self, feature):\n pos_tagged_features = self.pos_tag_tokenized_sentences(self.tokenize_sentence(feature))\n for element in pos_tagged_features:\n self.raw_feature_list.append(element)\n\n def add_feature(self, feature):\n self.feature_list.append(feature.strip())\n\n # def evaluate(self):\n # document = Documents()\n # truth_set = document.load_app_features_by_app_name(self.app_name)\n # test_set = self.feature_list\n\n # evalutation = Evaluation()\n # evalutation.evaluate(truth_set, test_set)\n\n def print_final_features_pos_tags(self):\n print(\"------------------\")\n for feature in self.feature_list:\n pos_tagged_features = self.pos_tag_tokenized_sentences(self.tokenize_sentence(feature))\n for element in pos_tagged_features:\n print(element)\n print(\"------------------\")\n\n def extract_from_review(self, review):\n self.feature_list = []\n self.raw_feature_list = []\n self.review = review.lower()\n\n \"\"\"The method that acts like a facade and controls all method calls\"\"\"\n sentences = self.extract_sentences(self.review)\n sentences = self.remove_text_in_brackets(sentences)\n sentences = self.filter_sentences(sentences)\n # sentences = self.remove_subordinate_clauses(sentences)\n sentences = self.expand_all_contractions(sentences)\n sentences = self.remove_symbols_from_review(sentences)\n\n tokens_per_sentence = self.tokenize_sentence(sentences)\n tokens_per_sentence = self.pos_tag_tokenized_sentences(tokens_per_sentence)\n tokens_per_sentence = self.lemmatize_sentences(tokens_per_sentence)\n tokens_per_sentence = self.remove_stopwords(tokens_per_sentence, True)\n\n pos_tags_per_sentence = self.pos_tag_tokenized_sentences(tokens_per_sentence)\n pos_tags_per_sentence = self.extract_features_from_complex_lists(pos_tags_per_sentence)\n if self.debug:\n print(\"features before chunking ------------->\")\n self.print_final_features()\n print(\"<------------- features before chunking\\n\")\n\n sentence_chunks = self.chunk_review_sentences(pos_tags_per_sentence)\n\n self.post_filter_app_features_from_reviews()\n # self.print_final_features()\n # self.print_final_features_pos_tags()\n # self.write_features_to_file()\n # self.evaluate()\n\n return self.feature_list\n\n def extract_from_description(self, description):\n self.feature_list = []\n self.raw_feature_list = []\n self.description = description\n\n \"\"\"The method that acts like a facade and controls all method calls\"\"\"\n sentences = self.extract_sentences(self.description)\n sentences = self.remove_text_in_brackets(sentences)\n sentences = self.filter_sentences(sentences)\n sentences = self.remove_symbols(sentences)\n sentences = self.remove_subordinate_clauses(sentences)\n\n if self.debug:\n self.debug_sentences(sentences)\n tokens_per_sentence = self.tokenize_sentence(sentences)\n tokens_per_sentence = self.remove_stopwords(tokens_per_sentence)\n\n pos_tags_per_sentence = self.pos_tag_tokenized_sentences(tokens_per_sentence)\n pos_tags_per_sentence = self.extract_features_from_complex_lists(pos_tags_per_sentence)\n if self.debug:\n print(\"features before chunking ------------->\")\n self.print_final_features()\n print(\"<------------- features before chunking\\n\")\n\n sentence_chunks = self.chunk_sentences(pos_tags_per_sentence)\n\n self.post_filter_app_features()\n self.print_final_features()\n # self.print_final_features_pos_tags()\n # self.write_features_to_file()\n # self.evaluate()\n\n return self.feature_list\n\n\nif __name__ == '__main__':\n ############\n # Extract from a single descriptions\n ############\n # example_description = \"\"\"\n\n # Kik has a brand new look! It's more fun and easy to use, so you can spend less time hunting for photos and GIFs and more time chatting with friends.\n\n # Quickly find friends, start groups and discover bots with the \"+\" menu\n # It's easier to send your favorite photos, GIFs, stickers and smileys - they're under the text field for easy access\n # When you have a lot to say and send several messages in a row, chat bubbles will group together\n # Looking for faded D? We made the S, D and R colors darker!\n \n # \"\"\"\n \n example_description = \"\"\"Kik has a brand new look! It's more fun and easy to use, so you can spend less time hunting for photos and GIFs and more time chatting with friends.\nQuickly find friends, start groups and discover bots with the \"+\" menu\nIt's easier to send your favorite photos, GIFs, stickers and smileys - they're under the text field for easy access\nWhen you have a lot to say and send several messages in a row, chat bubbles will group together\nLooking for faded D? We made the S, D and R colors darker! \"\"\"\n\n print(example_description)\n # example_review = \"\"\"\n # 5 Cannot write more than two captions While sending images.....??\n # 3 sometimes very slow, screen offs quickly, Pl help improve this many thanks.\n # 3 unable to delete one call instead all list of call you made..\n # 5 Please make an ipad version of whatsapp.\n # 5 This is a good app but I think people should have snapchat because snapchat has cute filters and you could call and video call and put stuff in your story and you can add friends on snapchat and you can also text your friends in snapchat and play games.\n # 5 great. I use it loads and find it really helpful. I pretty much made this review to ask this question. my friends' group chats always have different colours for people's names but mine has no colours. Why is this?\n # 5 Still an awesome app. Sometimes i wish WhatsApp have on and off button, coz i just want to ignore everybody for a moment. ??\n # 5 all r very nice but we needed some more clarity in pic while video calling\n # 1 plzz add this feature then show the profile picture only one person like in a status\n # 5 best way to connect to my friends locally and internationally and share clear videos and pictures!!\n # 1 When we will forwarding the message limitations of 5 people, but we need Unlimited people/group. After the complain it's not changed.\n # 5 I use it a lot, really great app.. But I need to know.. Would the blocked contacts be informed or notified if I change my phone number?\n # 4 Great messenger. But still no full darkmode.\n # 3 sometimes message notification not ringing\n # 1 cant share video\n # 3 it's very interesting yo share your sadness in status.\n # 5 Whatsapp, can you make *message editing* option?\n # 3 please add default status downloader update it quick for further more subscribers\n # 3 it OK but the call cut off in the middle of the conversation ...need. to be more clear\n # 5 So convenient specially with video call\n # 1 We need to be able to use whatsapp without data or WiFi\n # 4 incoming call doesnt show up on lockscreen\n # 3 video calls not clear ??\n # 5 just a little suggestion, can whatsapp make an option where you can schedule a mute for certain group. instead of setting the group mute for 8 hours everyday, it already been scheduled to mute from this hour to this hour on this certain day. i think this is very helpful to mute a group during working days.\n # 5 A nice improvement... Better than the last.\n # 4 i can't see if what time the person who is online or not..\n # 3 This is garbage, I prefer the old version\n # 4 It's nice. But seeing people's comments on other people's statuses would be more fun.\n # 1 update version is not working properly. mia1 ph. do something as early as possible.\n # 4 Great app, but dislikes updates\n # 5 I love whatsapp!! So useful...to write with...call or send videos and photos and links and share and copy paste!!\n # 5 Its good cause its reception is always good ,sound quality during calls wheather its video or voice or overseas calll is always on perfection, Keep up the good work\n # 4 picture-in-picture player doesn't work anymore and sends me to YouTube anyway\n # \"\"\"\n example_review = \"\"\"\n So convenient specially with video call\"\"\"\n\n feature_extractor = SAFE()\n # x= feature_extractor.extract_from_description(example_description)\n # y= feature_extractor.extract_from_review(example_description)\n # print(\"------\",x)\n # print(\"++++++\",y)\n\n # whatsnew_f= open('..\\\\kik_whatsnew.txt', encoding=\"utf-8\", errors=\"ignore\")\n # whatsnew_text = whatsnew_f.read()\n # whatsnew_f.close()\n # # print(str(whatsnew_text))\n # set_A=feature_extractor.extract_from_description(whatsnew_text)\n # set_B=feature_extractor.extract_from_review(whatsnew_text)\n # set_C=set_A.union(set_B)\n \n # print(\"Union++++\",set_C)\n # print(\"R++++\",feature_extractor.extract_from_review((whatsnew_text)))", "sub_path": "Functional Feature Extraction/FE_SAFE_On_dataset.py", "file_name": "FE_SAFE_On_dataset.py", "file_ext": "py", "file_size_in_byte": 43272, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "nltk.tag.stanford.StanfordPOSTagger", "line_number": 23, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 49, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 49, "usage_type": "name"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 50, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 50, "usage_type": "name"}, {"api_name": "nltk.WordNetLemmatizer", "line_number": 70, "usage_type": "call"}, {"api_name": "nltk.RegexpParser", "line_number": 71, "usage_type": "call"}, {"api_name": "nltk.RegexpParser", "line_number": 72, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 85, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet.ADJ", "line_number": 96, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 96, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 98, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 98, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 100, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 100, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.ADV", "line_number": 102, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 102, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 104, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 104, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 116, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 119, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 127, "usage_type": "call"}, {"api_name": "re.match", "line_number": 131, "usage_type": "call"}, {"api_name": "re.match", "line_number": 132, "usage_type": "call"}, {"api_name": "re.match", "line_number": 141, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 157, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 158, "usage_type": "call"}, {"api_name": "re.search", "line_number": 198, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 208, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 212, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 373, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 373, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 431, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 455, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 456, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 663, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 663, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 697, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 697, "usage_type": "attribute"}]} +{"seq_id": "629650201", "text": "\"\"\"\nCopyright (c) 2015, 2019 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\nimport json\nimport os\n\nfrom osbs.exceptions import OsbsResponseException\n\nfrom atomic_reactor.plugins.pre_reactor_config import get_openshift_session, get_koji\nfrom atomic_reactor.plugins.pre_fetch_sources import PLUGIN_FETCH_SOURCES_KEY\nfrom atomic_reactor.constants import (PLUGIN_KOJI_UPLOAD_PLUGIN_KEY,\n PLUGIN_VERIFY_MEDIA_KEY,\n PLUGIN_RESOLVE_REMOTE_SOURCE,\n SCRATCH_FROM)\nfrom atomic_reactor.plugin import ExitPlugin\nfrom atomic_reactor.util import get_build_json\n\n\nclass StoreMetadataInOSv3Plugin(ExitPlugin):\n key = \"store_metadata_in_osv3\"\n is_allowed_to_fail = False\n\n def __init__(self, tasker, workflow, url=None, verify_ssl=True, use_auth=True):\n \"\"\"\n constructor\n\n :param tasker: ContainerTasker instance\n :param workflow: DockerBuildWorkflow instance\n :param url: str, URL to OSv3 instance\n :param use_auth: bool, initiate authentication with openshift?\n \"\"\"\n # call parent constructor\n super(StoreMetadataInOSv3Plugin, self).__init__(tasker, workflow)\n self.openshift_fallback = {\n 'url': url,\n 'insecure': not verify_ssl,\n 'auth': {'enable': use_auth}\n }\n self.source_build = PLUGIN_FETCH_SOURCES_KEY in self.workflow.prebuild_results\n\n def get_result(self, result):\n if isinstance(result, Exception):\n result = ''\n\n return result\n\n def get_pre_result(self, key):\n return self.get_result(self.workflow.prebuild_results.get(key, ''))\n\n def get_post_result(self, key):\n return self.get_result(self.workflow.postbuild_results.get(key, ''))\n\n def get_exit_result(self, key):\n return self.get_result(self.workflow.exit_results.get(key, ''))\n\n def get_config_map(self):\n annotations = self.get_post_result(PLUGIN_KOJI_UPLOAD_PLUGIN_KEY)\n if not annotations:\n return {}\n\n return annotations\n\n def get_digests(self):\n \"\"\"\n Returns a map of repositories to digests\n \"\"\"\n\n digests = {} # repository -> digest\n for registry in self.workflow.push_conf.docker_registries:\n for image in self.workflow.tag_conf.images:\n image_str = image.to_str()\n if image_str in registry.digests:\n digest = registry.digests[image_str]\n digests[image.to_str(registry=False)] = digest\n\n return digests\n\n def _get_registries(self):\n \"\"\"\n Return a list of registries that this build updated\n \"\"\"\n return self.workflow.push_conf.all_registries\n\n def get_repositories(self):\n # usually repositories formed from NVR labels\n # these should be used for pulling and layering\n primary_repositories = []\n for registry in self._get_registries():\n for image in self.workflow.tag_conf.primary_images:\n registry_image = image.copy()\n registry_image.registry = registry.uri\n primary_repositories.append(registry_image.to_str())\n\n # unique unpredictable repositories\n unique_repositories = []\n for registry in self._get_registries():\n for image in self.workflow.tag_conf.unique_images:\n registry_image = image.copy()\n registry_image.registry = registry.uri\n unique_repositories.append(registry_image.to_str())\n\n # floating repositories\n # these should be used for pulling and layering\n floating_repositories = []\n for registry in self._get_registries():\n for image in self.workflow.tag_conf.floating_images:\n registry_image = image.copy()\n registry_image.registry = registry.uri\n floating_repositories.append(registry_image.to_str())\n return {\n \"primary\": primary_repositories,\n \"unique\": unique_repositories,\n \"floating\": floating_repositories,\n }\n\n def get_pullspecs(self, digests):\n # v2 registry digests\n pullspecs = []\n for registry in self._get_registries():\n for image in self.workflow.tag_conf.images:\n image_str = image.to_str()\n if image_str in digests:\n digest = digests[image_str]\n for digest_version in digest.content_type:\n if digest_version not in digest:\n continue\n pullspecs.append({\n \"registry\": registry.uri,\n \"repository\": image.to_str(registry=False, tag=False),\n \"tag\": image.tag,\n \"digest\": digest[digest_version],\n \"version\": digest_version\n })\n\n return pullspecs\n\n def get_plugin_metadata(self):\n return {\n \"errors\": self.workflow.plugins_errors,\n \"timestamps\": self.workflow.plugins_timestamps,\n \"durations\": self.workflow.plugins_durations,\n }\n\n def get_filesystem_metadata(self):\n data = {}\n try:\n data = self.workflow.fs_watcher.get_usage_data()\n self.log.debug(\"filesystem metadata: %s\", data)\n except Exception:\n self.log.exception(\"Error getting filesystem stats\")\n\n return data\n\n def _update_labels(self, labels, updates):\n if updates:\n updates = {key: str(value) for key, value in updates.items()}\n labels.update(updates)\n\n def make_labels(self):\n labels = {}\n self._update_labels(labels, self.workflow.labels)\n self._update_labels(labels, self.workflow.build_result.labels)\n\n if 'sources_for_koji_build_id' in labels:\n labels['sources_for_koji_build_id'] = str(labels['sources_for_koji_build_id'])\n\n return labels\n\n def set_koji_task_annotations_whitelist(self, annotations):\n \"\"\"Whitelist annotations to be included in koji task output\n\n Allow annotations whose names are listed in task_annotations_whitelist\n koji's configuration to be included in the build_annotations.json file,\n which will be attached in the koji task output.\n \"\"\"\n koji_config = get_koji(self.workflow)\n whitelist = koji_config.get('task_annotations_whitelist')\n if whitelist:\n annotations['koji_task_annotations_whitelist'] = json.dumps(whitelist)\n\n def _update_annotations(self, annotations, updates):\n if updates:\n updates = {key: json.dumps(value) for key, value in updates.items()}\n annotations.update(updates)\n\n def apply_build_result_annotations(self, annotations):\n self._update_annotations(annotations, self.workflow.build_result.annotations)\n\n def apply_plugin_annotations(self, annotations):\n self._update_annotations(annotations, self.workflow.annotations)\n\n def apply_remote_source_annotations(self, annotations):\n try:\n rs_annotations = self.get_pre_result(PLUGIN_RESOLVE_REMOTE_SOURCE)['annotations']\n except (TypeError, KeyError):\n return\n annotations.update(rs_annotations)\n\n def run(self):\n metadata = get_build_json().get(\"metadata\", {})\n\n try:\n build_id = metadata[\"name\"]\n except KeyError:\n self.log.error(\"malformed build json\")\n return\n self.log.info(\"build id = %s\", build_id)\n osbs = get_openshift_session(self.workflow, self.openshift_fallback)\n\n if not self.source_build:\n try:\n commit_id = self.workflow.source.commit_id\n except AttributeError:\n commit_id = \"\"\n\n # for early flatpak failure before it creates Dockerfile and creates dockerfile_images\n if self.workflow.builder.dockerfile_images is None:\n base_image_name = \"\"\n base_image_id = \"\"\n parent_images_strings = {}\n else:\n base_image = self.workflow.builder.dockerfile_images.original_base_image\n if (base_image is not None and\n not self.workflow.builder.dockerfile_images.base_from_scratch):\n base_image_name = base_image\n try:\n base_image_id = self.workflow.builder.base_image_inspect['Id']\n except KeyError:\n base_image_id = \"\"\n else:\n base_image_name = \"\"\n base_image_id = \"\"\n\n parent_images_strings = self.workflow.builder.parent_images_to_str()\n if self.workflow.builder.dockerfile_images.base_from_scratch:\n parent_images_strings[SCRATCH_FROM] = SCRATCH_FROM\n\n try:\n with open(self.workflow.builder.df_path) as f:\n dockerfile_contents = f.read()\n except AttributeError:\n dockerfile_contents = \"\"\n\n annotations = {\n 'repositories': json.dumps(self.get_repositories()),\n 'digests': json.dumps(self.get_pullspecs(self.get_digests())),\n 'plugins-metadata': json.dumps(self.get_plugin_metadata()),\n 'filesystem': json.dumps(self.get_filesystem_metadata()),\n }\n\n if self.source_build:\n annotations['image-id'] = ''\n if self.workflow.koji_source_manifest:\n annotations['image-id'] = self.workflow.koji_source_manifest['config']['digest']\n else:\n annotations['dockerfile'] = dockerfile_contents\n annotations['commit_id'] = commit_id\n annotations['base-image-id'] = base_image_id\n annotations['base-image-name'] = base_image_name\n annotations['image-id'] = self.workflow.builder.image_id or ''\n annotations['parent_images'] = json.dumps(parent_images_strings)\n\n media_types = []\n\n media_results = self.workflow.exit_results.get(PLUGIN_VERIFY_MEDIA_KEY)\n if isinstance(media_results, Exception):\n media_results = None\n\n if media_results:\n media_types += media_results\n\n if media_types:\n annotations['media-types'] = json.dumps(sorted(list(set(media_types))))\n\n tar_path = tar_size = tar_md5sum = tar_sha256sum = None\n if len(self.workflow.exported_image_sequence) > 0:\n tar_path = self.workflow.exported_image_sequence[-1].get(\"path\")\n tar_size = self.workflow.exported_image_sequence[-1].get(\"size\")\n tar_md5sum = self.workflow.exported_image_sequence[-1].get(\"md5sum\")\n tar_sha256sum = self.workflow.exported_image_sequence[-1].get(\"sha256sum\")\n # looks like that openshift can't handle value being None (null in json)\n if tar_size is not None and tar_md5sum is not None and tar_sha256sum is not None and \\\n tar_path is not None:\n annotations[\"tar_metadata\"] = json.dumps({\n \"size\": tar_size,\n \"md5sum\": tar_md5sum,\n \"sha256sum\": tar_sha256sum,\n \"filename\": os.path.basename(tar_path),\n })\n\n self.apply_remote_source_annotations(annotations)\n\n annotations.update(self.get_config_map())\n\n self.apply_plugin_annotations(annotations)\n self.apply_build_result_annotations(annotations)\n self.set_koji_task_annotations_whitelist(annotations)\n\n try:\n osbs.update_annotations_on_build(build_id, annotations)\n except OsbsResponseException:\n self.log.debug(\"annotations: %r\", annotations)\n raise\n\n labels = self.make_labels()\n if labels:\n try:\n osbs.update_labels_on_build(build_id, labels)\n except OsbsResponseException:\n self.log.debug(\"labels: %r\", labels)\n raise\n\n return {\"annotations\": annotations, \"labels\": labels}\n", "sub_path": "atomic_reactor/plugins/exit_store_metadata_in_osv3.py", "file_name": "exit_store_metadata_in_osv3.py", "file_ext": "py", "file_size_in_byte": 12364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "atomic_reactor.plugin.ExitPlugin", "line_number": 23, "usage_type": "name"}, {"api_name": "atomic_reactor.plugins.pre_fetch_sources.PLUGIN_FETCH_SOURCES_KEY", "line_number": 43, "usage_type": "name"}, {"api_name": "atomic_reactor.constants.PLUGIN_KOJI_UPLOAD_PLUGIN_KEY", "line_number": 61, "usage_type": "argument"}, {"api_name": "atomic_reactor.plugins.pre_reactor_config.get_koji", "line_number": 180, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 183, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 187, "usage_type": "call"}, {"api_name": "atomic_reactor.constants.PLUGIN_RESOLVE_REMOTE_SOURCE", "line_number": 198, "usage_type": "argument"}, {"api_name": "atomic_reactor.util.get_build_json", "line_number": 204, "usage_type": "call"}, {"api_name": "osbs.exceptions", "line_number": 212, "usage_type": "name"}, {"api_name": "atomic_reactor.plugins.pre_reactor_config.get_openshift_session", "line_number": 212, "usage_type": "call"}, {"api_name": "atomic_reactor.constants.SCRATCH_FROM", "line_number": 240, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 249, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 250, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 251, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 252, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 265, "usage_type": "call"}, {"api_name": "atomic_reactor.constants.PLUGIN_VERIFY_MEDIA_KEY", "line_number": 269, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 277, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "osbs.exceptions.update_annotations_on_build", "line_number": 304, "usage_type": "call"}, {"api_name": "osbs.exceptions", "line_number": 304, "usage_type": "name"}, {"api_name": "osbs.exceptions.OsbsResponseException", "line_number": 305, "usage_type": "name"}, {"api_name": "osbs.exceptions.update_labels_on_build", "line_number": 312, "usage_type": "call"}, {"api_name": "osbs.exceptions", "line_number": 312, "usage_type": "name"}, {"api_name": "osbs.exceptions.OsbsResponseException", "line_number": 313, "usage_type": "name"}]} +{"seq_id": "309788984", "text": "from django.contrib.auth import authenticate, login,logout\nfrom core.backend.authentication.forms import LoginForm, NewUserCreationForm\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\n\n# Create your views here.\ndef register(request):\n if request.user.is_authenticated:\n return redirect('/')\n else:\n if request.method == \"POST\":\n form = NewUserCreationForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n form.save()\n messages.info(\n request, f\"{username} Your Account Created Succesfully!\")\n\n url = request.GET.get(\"next\",None)\n return redirect(f'/login/?next={url}') if url else redirect(\"login\")\n \n form = NewUserCreationForm()\n context = {\n \"form\":form\n }\n return render(request,\"register.html\",context)\n\ndef mylogin(request):\n if request.user.is_authenticated:\n return redirect('/')\n else:\n form = LoginForm(request.POST or None)\n msg = None\n if request.method == \"POST\":\n if form.is_valid():\n email = form.cleaned_data.get(\"email\")\n username = None\n try:\n username = get_object_or_404(User,email=email).username\n except:\n username = None\n password = form.cleaned_data.get(\"password\")\n url = request.GET.get(\"next\",None)\n print(\"url: \",url)\n\n user = authenticate(request,username=username,password=password)\n\n if user is not None:\n login(request,user)\n\n return redirect(url) if url else redirect(\"quiz-list\")\n else:\n msg = 'Invalid credentials' \n else:\n msg = 'Error validating the form'\n context = {\n \"form\":form,\n \"msg\":msg\n }\n return render(request,\"login.html\",context)\n\ndef mylogout(request):\n logout(request)\n return redirect(\"login\")", "sub_path": "core/backend/authentication/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2241, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 11, "usage_type": "call"}, {"api_name": "core.backend.authentication.forms.NewUserCreationForm", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "core.backend.authentication.forms.NewUserCreationForm", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "core.backend.authentication.forms.LoginForm", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 53, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "300688752", "text": "# pylint: disable=missing-docstring\nfrom .base import BaseProcessorTestCase\nfrom .utils import PreparedData\n\n\nclass VariantCallingTestCase(BaseProcessorTestCase, PreparedData):\n def test_variant_calling(self):\n genome = self.prepare_genome()\n reads = self.prepare_reads('vc_reads.fastq')\n\n inputs = {'genome': genome.pk, 'reads': reads.pk, 'reporting': {'rep_mode': \"def\"}}\n aligned_reads = self.run_processor('alignment:bowtie-2-2-3_trim', inputs)\n self.assertFiles(aligned_reads, 'stats', 'VC_bt2_mem_reads_report.txt')\n\n # samtools variant calling test\n inputs = {'genome': genome.pk, 'mapping': aligned_reads.pk}\n samtools_variants = self.run_processor('vc-samtools', inputs)\n self.assertFiles(samtools_variants, 'vcf', 'VC_reads_align_samtoolscalls.vcf')\n\n # GATK variant calling test\n inputs = {\n 'genome': genome.pk,\n 'mapping': aligned_reads.pk,\n 'known_sites': samtools_variants.pk,\n 'known_indels': [samtools_variants.pk],\n 'reads_info': {\n 'ID': \"x\",\n 'SM': \"x\",\n 'PL': \"Illumina\",\n 'LB': \"x\",\n 'CN': \"def\",\n 'DT': \"2014-08-05\"},\n 'Varc_param': {'stand_emit_conf': 10, 'stand_call_conf': 30}}\n self.run_processor('vc-gatk', inputs)\n # NOTE: output can not be tested\n\n # GATK joint variant calling test\n inputs = {\n 'genome': genome.pk,\n 'mapping': [aligned_reads.pk],\n 'reads_info': {\n 'PL': \"Illumina\",\n 'LB': \"x\",\n 'CN': \"def\",\n 'DT': \"2014-08-05\"},\n 'Varc_param': {'stand_emit_conf': 10, 'stand_call_conf': 30}}\n self.run_processor('vc-gatk-joint', inputs)\n # NOTE: output can not be tested\n", "sub_path": "tests/test_variant_calling.py", "file_name": "test_variant_calling.py", "file_ext": "py", "file_size_in_byte": 1887, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "base.BaseProcessorTestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "utils.PreparedData", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "413322939", "text": "# -*- coding: utf-8 -*-\n# see LICENSE.rst\n\n# ----------------------------------------------------------------------------\n#\n# TITLE : Create MW Potential Scripts\n# AUTHOR : Jo Bovy and Nathaniel Starkman\n# PROJECT : Pal 5 update MW potential constraints\n#\n# ----------------------------------------------------------------------------\n\n\"\"\"Create MW Potential Scripts.\n\nRoutine Listings\n----------------\nmake_parser\nmain\nrun_fit_mwpot14\nrun_fit_mwpot_dblexp\nrun_fit_potential_pal5\nrun_fit_potential_gd1\nrun_fit_potential_combo_pal5_gd1\n\n\"\"\"\n\n__author__ = [\"Nathaniel Starkman\", \"Jo Bovy\"]\n__maintainer__ = \"Nathaniel Starkman\"\n\n__all__ = [\n \"make_parser\",\n \"main\",\n \"run_fit_mwpot14\",\n \"run_fit_mwpot_dblexp\",\n \"run_fit_potential_pal5\",\n \"run_fit_potential_gd1\",\n \"run_fit_potential_combo_pal5_gd1\",\n]\n\n\n##############################################################################\n# IMPORTS\n\n# GENERAL\n\nimport os\nimport argparse\nimport copy\nfrom typing import Optional\n\n\n# PROJECT-SPECIFIC\n\nfrom .fit_mwpot14_script import main as run_fit_mwpot14\nfrom .fit_mwpot_dblexp_script import main as run_fit_mwpot_dblexp\nfrom .fit_potential_pal5_script import main as run_fit_potential_pal5\nfrom .fit_potential_gd1_script import main as run_fit_potential_gd1\nfrom .fit_potential_combo_pal5_gd1_script import (\n main as run_fit_potential_combo_pal5_gd1,\n)\nfrom . import script_util as su\n\n\n##############################################################################\n# COMMAND LINE\n##############################################################################\n\n\ndef make_parser(inheritable=False):\n \"\"\"Make Parser.\n\n Parameters\n ----------\n inheritable: bool\n whether the parser can be inherited from (default False).\n if True, sets ``add_help=False`` and ``conflict_hander='resolve'``\n\n Returns\n -------\n parser: ArgumentParser\n\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Run full create_MW_potential_2014 set of scripts.\",\n add_help=~inheritable,\n conflict_handler=\"resolve\" if ~inheritable else \"error\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n default=\"output/\",\n type=str,\n help=\"output save folder\",\n dest=\"opath\",\n )\n parser.add_argument(\n \"-f\",\n \"--figure\",\n default=\"figures/\",\n type=str,\n help=\"figure save folder\",\n dest=\"fpath\",\n )\n\n return parser\n\n\n# /defs\n\n\n# -------------------------------------------------------------------\n\n\ndef main(args: Optional[list] = None, opts: Optional[argparse.Namespace] = None):\n \"\"\"Run the Full create_MW_potential_2014 set of scripts.\n\n Parameters\n ----------\n args : list, optional\n an optional single argument that holds the sys.argv list,\n except for the script name (e.g., argv[1:])\n opts : Namespace, optional\n already parsed options.\n Will be ignored if args is not None.\n\n \"\"\"\n if opts is not None and args is None:\n raise ValueError(\"TODO get args from parser\")\n else:\n parser = make_parser()\n opts = parser.parse_args(args)\n\n # ---------------------------------\n\n if not os.path.exists(opts.opath):\n os.makedirs(opts.opath)\n\n if not os.path.exists(opts.fpath):\n os.makedirs(opts.fpath)\n\n # ---------------------------------\n\n # 1) fit_mwpot14\n mw14opts = copy.copy(opts)\n mw14opts.fpath += \"mwpot14/\"\n run_fit_mwpot14(opts=mw14opts)\n\n # 2) fit_mwpot-dblexp\n dblopts = copy.copy(opts)\n dblopts.fpath += \"mwpot_dblexp/\"\n run_fit_mwpot_dblexp(opts=dblopts)\n\n # 3) Pal5\n pal5opts = copy.copy(opts)\n pal5opts.fpath += \"pal5/\"\n run_fit_potential_pal5(opts=pal5opts)\n\n # 4) GD1\n gd1opts = copy.copy(opts)\n gd1opts.fpath += \"gd1/\"\n run_fit_potential_gd1(opts=gd1opts)\n\n # 5) Combo Pal5 and GD1\n comboopts = copy.copy(opts)\n comboopts.fpath += \"combo_pal5_gd1/\"\n run_fit_potential_combo_pal5_gd1(opts=comboopts)\n\n # 6) plot force field\n su.plotForceField(\"figures/mwhalo-shapeforcefield.pdf\")\n\n return\n\n\n# /def\n\n\n##############################################################################\n# END\n", "sub_path": "pal5_constrain_mwhalo_shape/scripts/create_MW_potential_2014/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 82, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 113, "usage_type": "name"}, {"api_name": "argparse.Namespace", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 138, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 143, "usage_type": "call"}, {"api_name": "fit_mwpot14_script.main", "line_number": 145, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 148, "usage_type": "call"}, {"api_name": "fit_mwpot_dblexp_script.main", "line_number": 150, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 153, "usage_type": "call"}, {"api_name": "fit_potential_pal5_script.main", "line_number": 155, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 158, "usage_type": "call"}, {"api_name": "fit_potential_gd1_script.main", "line_number": 160, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 163, "usage_type": "call"}, {"api_name": "fit_potential_combo_pal5_gd1_script.main", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "82771034", "text": "from flask import current_app, render_template\nfrom maintain_frontend.exceptions import ApplicationError\nfrom maintain_frontend.dependencies.search_api.local_land_charge_service import LocalLandChargeService\nfrom maintain_frontend.view_modify_land_charge.view_land_charge import validate_charge_id\nfrom maintain_frontend.decorators import requires_permission\nfrom maintain_frontend.constants.permissions import Permissions\nfrom maintain_frontend.models import LocalLandChargeHistoryItem, LocalLandChargeItem\nfrom maintain_frontend.constants.lon_defaults import LonDefaults\nfrom maintain_frontend.services.date_formatter import DateFormatter\n\n\ndef register_routes(bp):\n bp.add_url_rule('//history', view_func=history, methods=['GET'])\n bp.add_app_template_filter(history_change_overview_format, 'history_change_overview_format')\n\n\n@requires_permission([Permissions.retrieve_llc])\ndef history(charge_id):\n\n llc_service = LocalLandChargeService(current_app.config)\n validate_charge_id(charge_id)\n\n history_response = llc_service.get_history_for_charge(charge_id)\n search_response = llc_service.get_by_charge_number(charge_id)\n\n if history_response.status_code == 404 or search_response.status_code == 404:\n current_app.logger.warning(\"Charge not found for charge_id='{}' - Returning not found\".format(charge_id))\n raise ApplicationError(404)\n\n if history_response.status_code == 500 or search_response.status_code == 500:\n current_app.logger.error(\"Server error occurred when getting details for charge_id''{}\".format(charge_id))\n raise ApplicationError(500)\n\n history_response.raise_for_status()\n search_response.raise_for_status()\n\n history_items = list(reversed(LocalLandChargeHistoryItem.from_json(history_response.json())))\n\n charge_data = search_response.json()[0]['item']\n land_charge = LocalLandChargeItem.from_json(charge_data)\n\n return render_template('view_charge_history.html', charge_id=charge_id,\n history=history_items, local_land_charge=land_charge,\n format_date_bst=DateFormatter.format_date_bst)\n\n\n# Custom Filters\n# @view_llc_bp.app_template_filter('history_change_overview_format')\ndef history_change_overview_format(local_land_charge_history, charge_type=None):\n item_changes = local_land_charge_history.item_changes\n cancelled = local_land_charge_history.cancelled\n\n # Ignore author changes\n if item_changes:\n item_changes.pop('author', None)\n\n if not item_changes:\n return \"No changes made\"\n\n if cancelled:\n return \"Charge is cancelled\"\n\n display_names = {\n \"applicant-name\": \"Name: person applying for the light obstruction notice\",\n \"applicant-address\": \"Address: person applying for the light obstruction notice\",\n \"charge-creation-date\": \"Creation date\",\n \"charge-geographic-description\": \"Location\",\n \"documents-filed\": \"Legal Document(s)\",\n \"expiry-date\": \"Expiry date\",\n \"further-information-location\": \"Source information\",\n \"further-information-reference\": \"Authority reference\",\n \"geometry\": \"Extent\",\n \"instrument\": \"Source\",\n \"statutory-provision\": \"Law\",\n \"structure-position-and-dimension\": \"Height and extent: Planned development\",\n \"servient-land-interest-description\": \"Interest in the land\",\n \"tribunal-temporary-certificate-date\": \"Temporary certificate date\",\n \"tribunal-temporary-certificate-expiry-date\": \"Temporary expiry date\",\n \"tribunal-definitive-certificate-date\": \"Definitive certificate date\",\n \"supplementary-information\": \"Description\",\n \"land-compensation-paid\": \"Advance payment\",\n \"land-compensation-amount-type\": \"Agreed or estimated\",\n \"amount-of-compensation\": \"Total compensation\"\n }\n\n if charge_type == LonDefaults.charge_type:\n display_names[\"charge-geographic-description\"] = \"Address - Dominant building\"\n\n changes_overview = []\n\n for key in item_changes:\n if key in display_names:\n changes_overview.append(display_names[key])\n else:\n # Remove dash and capitalize if required\n change = key.replace(\"-\", \" \").title()\n changes_overview.append(change)\n\n changes_overview = sorted(changes_overview)\n\n return ',
'.join(changes_overview)\n", "sub_path": "maintain_frontend/view_modify_land_charge/view_land_charge_history.py", "file_name": "view_land_charge_history.py", "file_ext": "py", "file_size_in_byte": 4397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "maintain_frontend.dependencies.search_api.local_land_charge_service.LocalLandChargeService", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 20, "usage_type": "name"}, {"api_name": "maintain_frontend.view_modify_land_charge.view_land_charge.validate_charge_id", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.current_app.logger.warning", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 27, "usage_type": "name"}, {"api_name": "maintain_frontend.exceptions.ApplicationError", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.current_app.logger.error", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 31, "usage_type": "name"}, {"api_name": "maintain_frontend.exceptions.ApplicationError", "line_number": 32, "usage_type": "call"}, {"api_name": "maintain_frontend.models.LocalLandChargeHistoryItem.from_json", "line_number": 37, "usage_type": "call"}, {"api_name": "maintain_frontend.models.LocalLandChargeHistoryItem", "line_number": 37, "usage_type": "name"}, {"api_name": "maintain_frontend.models.LocalLandChargeItem.from_json", "line_number": 40, "usage_type": "call"}, {"api_name": "maintain_frontend.models.LocalLandChargeItem", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}, {"api_name": "maintain_frontend.services.date_formatter.DateFormatter.format_date_bst", "line_number": 44, "usage_type": "attribute"}, {"api_name": "maintain_frontend.services.date_formatter.DateFormatter", "line_number": 44, "usage_type": "name"}, {"api_name": "maintain_frontend.decorators.requires_permission", "line_number": 17, "usage_type": "call"}, {"api_name": "maintain_frontend.constants.permissions.Permissions.retrieve_llc", "line_number": 17, "usage_type": "attribute"}, {"api_name": "maintain_frontend.constants.permissions.Permissions", "line_number": 17, "usage_type": "name"}, {"api_name": "maintain_frontend.constants.lon_defaults.LonDefaults.charge_type", "line_number": 86, "usage_type": "attribute"}, {"api_name": "maintain_frontend.constants.lon_defaults.LonDefaults", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "411806755", "text": "# -*- coding: utf-8 -*-\n# @Time : 2021/7/7 0:02\n# @Author : wkRonin\n# @File :test_mini_program.py\nfrom time import sleep\n\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass TestWebview:\n def setup(self):\n des_caps = {\n 'platformName': 'android',\n 'platformVersion': '8.0',\n # 'browserName': '',\n 'noReset': 'true',\n 'newCommandTimeout': '300',\n 'showChromedriverLog': 'true',\n 'deviceName': 'com.tecent.mm',\n 'appPackage': 'com.tencent.mm',\n 'appActivity': '.ui.LauncherUI',\n 'unicodeKeyboard': 'true',\n 'resetKeyvoard': 'true',\n 'chromeOptions': {'w3c': False,\n 'androidProcess': 'com.tencent.mm:appbrand0'\n },\n 'udid': 'd59c99c6',\n # 'chromedriverExecutableDir': r'D:\\pycharmproject\\pythonProject\\hogwartsAppium\\chromedrivers'\n # 'chromedriverChromeMappingFile': 'D:\\pycharmproject\\pythonProject\\hogwartsAppium\\mapping.json',\n # 通过自己的adb代理修复chromedriver的bug并解决@xweb_devtools_remote的问题\n # 'adbPort': 5038\n }\n self.driver = webdriver.Remote('http://localhost:4723/wd/hub', des_caps)\n self.driver.implicitly_wait(30)\n self.driver.find_element(By.XPATH, \"//*[@text='通讯录']\")\n # self.driver.implicitly_wait(10)\n\n\n def teardown(self):\n self.driver.quit()\n\n def find_top_window(self):\n for window in self.driver.window_handles:\n print(window)\n if \":VISIBLE\" in self.driver.title:\n print(self.driver.title)\n return True\n else:\n self.driver.switch_to.window(window)\n return False\n\n def test_search(self):\n size = self.driver.get_window_size()\n self.driver.swipe(size['width'] * 0.5, size['height'] * 0.4, size['width'] * 0.5, size['height'] * 0.8)\n sleep(5)\n self.driver.find_element(MobileBy.XPATH, \"//*[@text='搜索小程序']\").click()\n sleep(5)\n print(self.driver.contexts)\n # self.driver.switch_to.context(\"WEBVIEW_com.tencent.mm:toolsmp\")\n # self.find_top_window()\n WebDriverWait(self.driver, 20).until(\n expected_conditions.element_to_be_clickable((MobileBy.XPATH, \"//*[@text='取消']\")))\n self.driver.find_element(MobileBy.ID, 'com.tencent.mm:id/db_').send_keys(\"雪球\")\n sleep(5)", "sub_path": "lubo_practice/test_mini_program.py", "file_name": "test_mini_program.py", "file_ext": "py", "file_size_in_byte": 2715, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "appium.webdriver.Remote", "line_number": 37, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "appium.webdriver.common.mobileby.MobileBy.XPATH", "line_number": 60, "usage_type": "attribute"}, {"api_name": "appium.webdriver.common.mobileby.MobileBy", "line_number": 60, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 65, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 66, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 66, "usage_type": "name"}, {"api_name": "appium.webdriver.common.mobileby.MobileBy.XPATH", "line_number": 66, "usage_type": "attribute"}, {"api_name": "appium.webdriver.common.mobileby.MobileBy", "line_number": 66, "usage_type": "name"}, {"api_name": "appium.webdriver.common.mobileby.MobileBy.ID", "line_number": 67, "usage_type": "attribute"}, {"api_name": "appium.webdriver.common.mobileby.MobileBy", "line_number": 67, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "413842933", "text": "import netrc\nimport getpass\nimport numpy as np\nimport pandas as pd\nimport os\nimport requests\nfrom requests.auth import HTTPDigestAuth\nfrom urllib.parse import urlparse\nfrom urllib.request import HTTPError\nfrom urllib.request import urlopen\nfrom io import BytesIO\nfrom zipfile import ZipFile\nfrom glob import glob\nfrom netCDF4 import Dataset as NC\nfrom pathlib import Path\n\nfrom .helper import hist_start, hist_end, proj_start, proj_end, proj_time, secpera\n\nURS_URL = \"https://urs.earthdata.nasa.gov\"\n\n\ndef get_username():\n username = \"\"\n\n # For Python 2/3 compatibility:\n try:\n do_input = raw_input # noqa\n except NameError:\n do_input = input\n\n while not username:\n try:\n username = do_input(\"Earthdata username: \")\n except KeyboardInterrupt:\n quit()\n return username\n\n\ndef get_password():\n password = \"\"\n while not password:\n try:\n password = getpass.getpass(\"password: \")\n except KeyboardInterrupt:\n quit()\n return password\n\n\ndef get_credentials():\n \"\"\"Get user credentials from .netrc or prompt for input.\"\"\"\n credentials = None\n errprefix = \"\"\n try:\n info = netrc.netrc()\n username, account, password = info.authenticators(urlparse(URS_URL).hostname)\n errprefix = \"netrc error: \"\n except Exception as e:\n if not (\"No such file\" in str(e)):\n print(\"netrc error: {0}\".format(str(e)))\n username = None\n password = None\n\n if not username:\n username = get_username()\n password = get_password()\n\n return (username, password)\n\n\ndef load_imbie():\n \"\"\"\n Loading the IMBIE Greenland data set downloaded from\n http://imbie.org/wp-content/uploads/2012/11/imbie_dataset_greenland_dynamics-2020_02_28.xlsx\n\n \"\"\"\n imbie_df = pd.read_excel(\n \"http://imbie.org/wp-content/uploads/2012/11/imbie_dataset_greenland_dynamics-2020_02_28.xlsx\",\n sheet_name=\"Greenland Ice Mass\",\n engine=\"openpyxl\",\n )\n imbie = imbie_df[\n [\n \"Year\",\n \"Cumulative ice sheet mass change (Gt)\",\n \"Cumulative ice sheet mass change uncertainty (Gt)\",\n \"Cumulative surface mass balance anomaly (Gt)\",\n \"Cumulative surface mass balance anomaly uncertainty (Gt)\",\n \"Cumulative ice dynamics anomaly (Gt)\",\n \"Cumulative ice dynamics anomaly uncertainty (Gt)\",\n \"Rate of mass balance anomaly (Gt/yr)\",\n \"Rate of ice dynamics anomaly (Gt/yr)\",\n \"Rate of mass balance anomaly uncertainty (Gt/yr)\",\n \"Rate of ice dyanamics anomaly uncertainty (Gt/yr)\",\n ]\n ].rename(\n columns={\n \"Rate of mass balance anomaly (Gt/yr)\": \"Rate of surface mass balance anomaly (Gt/yr)\",\n \"Rate of mass balance anomaly uncertainty (Gt/yr)\": \"Rate of surface mass balance anomaly uncertainty (Gt/yr)\",\n \"Rate of ice dyanamics anomaly uncertainty (Gt/yr)\": \"Rate of ice dynamics anomaly uncertainty (Gt/yr)\",\n }\n )\n\n for v in [\n \"Cumulative ice sheet mass change (Gt)\",\n \"Cumulative ice dynamics anomaly (Gt)\",\n \"Cumulative surface mass balance anomaly (Gt)\",\n ]:\n imbie[v] -= imbie[imbie[\"Year\"] == proj_start][v].values\n\n s = imbie[(imbie[\"Year\"] >= 1980) & (imbie[\"Year\"] < 1990)]\n mass_mean = s[\"Cumulative ice sheet mass change (Gt)\"].mean() / (1990 - 1980)\n smb_mean = s[\"Cumulative surface mass balance anomaly (Gt)\"].mean() / (1990 - 1980)\n imbie[f\"Rate of surface mass balance anomaly (Gt/yr)\"] += 2 * 1964 / 10\n imbie[f\"Rate of ice dynamics anomaly (Gt/yr)\"] -= 2 * 1964 / 10\n\n return imbie\n\n\ndef load_mouginot():\n \"\"\"\n Load the Mouginot et al (2019) data set\n \"\"\"\n mou19_df = pd.read_excel(\n \"https://www.pnas.org/highwire/filestream/860129/field_highwire_adjunct_files/2/pnas.1904242116.sd02.xlsx\",\n sheet_name=\"(2) MB_GIS\",\n header=8,\n usecols=\"B,AR:BJ\",\n engine=\"openpyxl\",\n )\n mou19_d = mou19_df.iloc[7]\n mou19_smb = mou19_df.iloc[19]\n mou19_mass = mou19_df.iloc[41]\n mou19 = pd.DataFrame(\n data=np.hstack(\n [\n mou19_df.columns[1::].values.reshape(-1, 1),\n mou19_mass.values[1::].reshape(-1, 1),\n np.cumsum(mou19_smb.values[1::]).reshape(-1, 1),\n -np.cumsum(mou19_d.values[1::]).reshape(-1, 1),\n mou19_smb.values[1::].reshape(-1, 1),\n -mou19_d.values[1::].reshape(-1, 1),\n ]\n ),\n columns=[\n \"Year\",\n \"Cumulative ice sheet mass change (Gt)\",\n \"Cumulative surface mass balance anomaly (Gt)\",\n \"Cumulative ice dynamics anomaly (Gt)\",\n \"Rate of surface mass balance anomaly (Gt/yr)\",\n \"Rate of ice dynamics anomaly (Gt/yr)\",\n ],\n )\n mou19 = mou19.astype(\n {\n \"Year\": float,\n \"Cumulative ice sheet mass change (Gt)\": float,\n \"Cumulative surface mass balance anomaly (Gt)\": float,\n \"Cumulative ice dynamics anomaly (Gt)\": float,\n \"Rate of surface mass balance anomaly (Gt/yr)\": float,\n \"Rate of ice dynamics anomaly (Gt/yr)\": float,\n }\n )\n\n # Normalize\n for v in [\n \"Cumulative ice sheet mass change (Gt)\",\n \"Cumulative ice dynamics anomaly (Gt)\",\n \"Cumulative surface mass balance anomaly (Gt)\",\n ]:\n mou19[v] -= mou19[mou19[\"Year\"] == proj_start][v].values\n\n return mou19\n\n\ndef load_grace():\n\n grace = pd.read_csv(\n \"greenland_mass_200204_202101.txt\",\n header=30,\n delim_whitespace=True,\n skipinitialspace=True,\n names=[\"Year\", \"Cumulative ice sheet mass change (Gt)\", \"Cumulative ice sheet mass change uncertainty (Gt)\"],\n )\n # Normalize GRACE signal to the starting date of the projection\n grace[\"Cumulative ice sheet mass change (Gt)\"] -= np.interp(\n proj_start, grace[\"Year\"], grace[\"Cumulative ice sheet mass change (Gt)\"]\n )\n\n return grace\n\n\ndef load_ismip6():\n outpath = \".\"\n url = \"https://zenodo.org/record/3939037/files/v7_CMIP5_pub.zip\"\n\n with urlopen(url) as zipresp:\n with ZipFile(BytesIO(zipresp.read())) as zfile:\n zfile.extractall(outpath)\n\n ismip6_filename = \"ismip6_gris_ctrl_removed.csv.gz\"\n if os.path.isfile(ismip6_filename):\n df = pd.read_csv(ismip6_filename)\n else:\n print(f\"{ismip6_filename} not found locally. Downloading.\")\n ismip6_to_csv(\"v7_CMIP5_pub\", ismip6_filename)\n df = pd.read_csv(ismip6_filename)\n return df\n\n\ndef ismip6_to_csv(basedir, ismip6_filename):\n # Now read model output from each of the ISMIP6 files. The information we\n # need is in the file names, not the metadate so this is no fun.\n # Approach is to read each dataset into a dataframe, then concatenate all\n # dataframes into one Arch dataframe that contains all model runs.\n # Resulting dataframe consists of both historical and projected changes\n\n ctrl_files = []\n for path in Path(basedir).rglob(\"*_mm_*_ctrl_proj.nc\"):\n ctrl_files.append(path)\n\n hist_files = []\n for path in Path(basedir).rglob(\"*_mm_*_historical.nc\"):\n hist_files.append(path)\n\n files = []\n dfs = []\n for path in Path(basedir).rglob(\"*_mm_cr_*.nc\"):\n files.append(path)\n # Experiment\n nc = NC(path)\n exp_sle = nc.variables[\"sle\"][:]\n # For comparison with GRACE, we use grounded ice mass, converted to Gt\n exp_mass = nc.variables[\"limgr\"][:] / 1e12\n exp_smb = nc.variables[\"smb\"][:] / 1e12 * secpera\n\n f = path.name.split(f\"scalars_mm_cr_GIS_\")[-1].split(\".nc\")[0].split(\"_\")\n # This is ugly, because of \"ITLS_PIK\"\n if len(f) == 3:\n group, model, exp = f\n else:\n g1, g2, model, exp = f\n group = f\"{g1}_{g2}\"\n\n if exp in [\"exp07\"]:\n rcp = 26\n else:\n rcp = 85\n # Find the coressponding CTRL Historical simulations\n ctrl_file = [m for m in ctrl_files if (f\"{group}_{model}\" in m.name)][0]\n hist_file = [m for m in hist_files if (f\"{group}_{model}\" in m.name)][0]\n\n # The last entry of the historical and the first entry of the projection are the same\n\n # Projection\n nc_ctrl = NC(ctrl_file)\n ctrl_sle = nc_ctrl.variables[\"sle\"][:]\n ctrl_mass = nc_ctrl.variables[\"limgr\"][:] / 1e12\n ctrl_smb = nc_ctrl.variables[\"smb\"][:] / 1e12 * secpera\n\n # Historical\n nc_hist = NC(hist_file)\n hist_sle = nc_hist.variables[\"sle\"][:-1] - nc_hist.variables[\"sle\"][-1]\n hist_mass = (nc_hist.variables[\"limgr\"][:-1] - nc_hist.variables[\"limgr\"][-1]) / 1e12\n hist_smb = nc_hist.variables[\"smb\"][:-1] / 1e12 * secpera\n\n # Per email with Heiko on Nov. 13, 2020, stick with just the exp projections alone, without adding back the ctrl projections\n \"\"\"\n from Heiko:\n \"The solution that we chose for ISMIP6 is therefore to remove the ctrl_proj from the projections\n and communicate the numbers as such, i.e. SL contribution for additional forcing after 2014. \n In our (strong) opinion, the results should never be communicated uncorrected.\"\n \n Also, point of reference from Goelzer et al., 2020, the ctrl simulations represent mass change\n with the SMB fixed to 1960-1989 levels (no anomaly in SMB) and no change in ice sheet mask.\n So ctrl after the historical spinup represents an abrupt return to an earlier SMB forcing in 2015.\n \"\"\"\n\n proj_sle = exp_sle\n proj_mass = exp_mass\n proj_smb = exp_smb\n\n # Historical simulations start at different years since initialization was left\n # up to the modelers\n hist_time = -np.arange(len(hist_sle))[::-1] + hist_end\n\n # Let's add the data to the main DataFrame\n m_time = np.hstack((hist_time, proj_time))\n m_sle = -np.hstack((hist_sle, proj_sle)) * 100\n m_sle -= np.interp(proj_start, m_time, m_sle)\n m_mass = np.hstack((hist_mass, proj_mass))\n m_smb = np.cumsum(np.hstack((hist_smb, proj_smb)))\n m_smb -= np.interp(proj_start, m_time, m_smb)\n m_d = m_mass - m_smb\n m_mass_rate = np.gradient(np.hstack((hist_mass, proj_mass)))\n m_smb_rate = np.hstack((hist_smb, proj_smb))\n m_d_rate = m_mass_rate - m_smb_rate\n m_mass -= np.interp(proj_start, m_time, m_mass)\n\n n = len(m_time)\n dfs.append(\n pd.DataFrame(\n data=np.hstack(\n [\n m_time.reshape(-1, 1),\n m_sle.reshape(-1, 1),\n m_mass.reshape(-1, 1),\n m_smb.reshape(-1, 1),\n m_d.reshape(-1, 1),\n m_mass_rate.reshape(-1, 1),\n m_smb_rate.reshape(-1, 1),\n m_d_rate.reshape(-1, 1),\n np.repeat(group, n).reshape(-1, 1),\n np.repeat(model, n).reshape(-1, 1),\n np.repeat(exp, n).reshape(-1, 1),\n np.repeat(rcp, n).reshape(-1, 1),\n ]\n ),\n columns=[\n \"Year\",\n \"SLE (cm)\",\n \"Cumulative ice sheet mass change (Gt)\",\n \"Cumulative surface mass balance anomaly (Gt)\",\n \"Cumulative ice dynamics anomaly (Gt)\",\n \"Rate of ice sheet mass change (Gt/yr)\",\n \"Rate of surface mass balance anomaly (Gt/yr)\",\n \"Rate of ice dynamics anomaly (Gt/yr)\",\n \"Group\",\n \"Model\",\n \"Exp\",\n \"RCP\",\n ],\n )\n )\n # End of working with each model run individually (the path for-loop)\n\n # Concatenate all DataFrames and convert object types\n df = pd.concat(dfs)\n df = df.astype(\n {\n \"Year\": float,\n \"SLE (cm)\": float,\n \"Cumulative ice sheet mass change (Gt)\": float,\n \"Cumulative surface mass balance anomaly (Gt)\": float,\n \"Cumulative ice dynamics anomaly (Gt)\": float,\n \"Rate of ice sheet mass change (Gt/yr)\": float,\n \"Rate of surface mass balance anomaly (Gt/yr)\": float,\n \"Rate of ice dynamics anomaly (Gt/yr)\": float,\n \"Model\": str,\n \"Exp\": str,\n \"RCP\": str,\n }\n )\n df.to_csv(ismip6_filename, compression=\"gzip\")\n", "sub_path": "utilities/data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 12757, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "getpass.getpass", "line_number": 43, "usage_type": "call"}, {"api_name": "netrc.netrc", "line_number": 54, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 76, "usage_type": "call"}, {"api_name": "helper.proj_start", "line_number": 108, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 139, "usage_type": "call"}, {"api_name": "helper.proj_start", "line_number": 170, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 185, "usage_type": "call"}, {"api_name": "helper.proj_start", "line_number": 186, "usage_type": "argument"}, {"api_name": "urllib.request.urlopen", "line_number": 196, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 197, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 202, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 206, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 218, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 222, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 227, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 230, "usage_type": "call"}, {"api_name": "helper.secpera", "line_number": 234, "usage_type": "name"}, {"api_name": "netCDF4.Dataset", "line_number": 255, "usage_type": "call"}, {"api_name": "helper.secpera", "line_number": 258, "usage_type": "name"}, {"api_name": "netCDF4.Dataset", "line_number": 261, "usage_type": "call"}, {"api_name": "helper.secpera", "line_number": 264, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 284, "usage_type": "call"}, {"api_name": "helper.hist_end", "line_number": 284, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 287, "usage_type": "call"}, {"api_name": "helper.proj_time", "line_number": 287, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 289, "usage_type": "call"}, {"api_name": "helper.proj_start", "line_number": 289, "usage_type": "argument"}, {"api_name": "numpy.hstack", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 292, "usage_type": "call"}, {"api_name": "helper.proj_start", "line_number": 292, "usage_type": "argument"}, {"api_name": "numpy.gradient", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 297, "usage_type": "call"}, {"api_name": "helper.proj_start", "line_number": 297, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 315, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 337, "usage_type": "call"}]} +{"seq_id": "163069675", "text": "import socket\nimport struct\nimport argparse\nimport logging\nimport sys\nimport random\n\ndef get_args(argv=None):\n '''read the arguments from command line and return the values'''\n parser = argparse.ArgumentParser(description=\"LIGHTCLIENT\")\n parser.add_argument('-s', type=str, required=True, help='Load Balancer IP')\n parser.add_argument('-p', type=int, required=True, help='Port')\n parser.add_argument('-l', type=str, required=True, help='logFile')\n parser.add_argument('-f', type=str, required=True, help='File to write to')\n args = parser.parse_args()\n loadBalancer = args.s\n server_port = args.p\n log_file = args.l \n dest_file = args.f\n return loadBalancer, server_port, log_file, dest_file\n\n\ndef get_bit(num, i):\n return int((num & (1 << i)) != 0)\n\ndef update_bit(num, i, bit):\n num = 0 \n mask = ~(1 << i)\n return (num & mask) | (bit << i)\n\ndef send_packet(sock, server_address, seq_num, ack_num, flag_seq):\n '''send packet'''\n flag_num = 0\n ack_flag = int(flag_seq[0])\n syn_flag = int(flag_seq[1])\n fin_flag = int(flag_seq[2])\n if ack_flag == 1:\n flag_num = update_bit(flag_num, 2, 1)\n if syn_flag == 1:\n flag_num = update_bit(flag_num, 1, 1)\n if fin_flag == 1:\n flag_num = update_bit(flag_num, 0, 1)\n\n data = struct.pack('>III', seq_num, ack_num, flag_num) \n sock.sendto(data, server_address)\n logging.info(f\"Sending: Seq num {seq_num} ACK num {ack_num}, ACK flag {ack_flag} SYN flag {syn_flag} FIN flag {fin_flag}\")\n\n\ndef recv_data(sock, size, handshake):\n '''receive data'''\n if handshake:\n r_data = sock.recvfrom(12)\n data = r_data[0]\n conn = r_data[1]\n payload = \"\"\n seq, ack, flags = struct.unpack('>III', data)\n ack_flag = get_bit(flags,2)\n syn_flag = get_bit(flags,1)\n fin_flag = get_bit(flags,0)\n logging.info(f\"Seq num {seq} ACK num {ack}, ACK flag {ack_flag} SYN flag {syn_flag} FIN flag {fin_flag}\")\n else:\n r_data = sock.recvfrom(524)\n data = r_data[0]\n conn = r_data[1]\n new_formatter = '>III'+str(len(data)-12)+'s'\n seq, ack, flags, payload = struct.unpack(new_formatter, data)\n ack_flag = get_bit(flags,2)\n syn_flag = get_bit(flags,1)\n fin_flag = get_bit(flags,0)\n logging.info(f\"Seq num {seq} ACK num {ack}, ACK flag {ack_flag} SYN flag {syn_flag} FIN flag {fin_flag}, Payload size: len((data)\")\n print(\"Received Message. Payload Size: {}\".format(len(payload)))\n return seq, ack, ack_flag, syn_flag, fin_flag, payload\n\nif __name__ == '__main__':\n #parse arguments\n server_ip, server_port, log_location, file_location = get_args(sys.argv[1:])\n print(\"Server IP: {}, Port: {}, Log location: {}, File Location: {} \".format(server_ip, server_port, log_location, file_location))\n\n #initialize the file\n\n f = open(file_location, 'w')\n f.close()\n\n #configure logging\n logging.basicConfig(filename=log_location, filemode='w', format='%(asctime)s - %(levelname)s - %(funcName)s - %(message)s', level=logging.INFO)\n logging.info(f'Starting LIGHTCLIENT')\n logging.info(f\"Remote Server IP = {server_ip}, Remote Server Port = {server_port}, Logfile = {log_location}\")\n\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server_addr = (server_ip, server_port)\n \n #send initial sequence number\n client_seq = random.randint(0,4294967295) #0 to 2^32 -1, as in TCP\n print(\"Client Sequence Number:\", client_seq)\n server_seq = 0\n\n #send first handshake packet\n #syn + ISN\n send_packet(client_socket, server_addr, client_seq, server_seq, '010')\n #receive SYN+ACK, SET REMOTE\n #seq_num, ack_num, flags = recv_data(client_socket, 12)\n #we are receiving handshake packets\n server_seq_num, server_ack_num, ack_flag, syn_flag, fin_flag, payload = recv_data(client_socket, 12, True)\n\n #syn + ACK\n\n send_packet(client_socket, server_addr, server_seq_num+1, server_ack_num, '100')\n \n #handshake done\n\n #get data\n while True:\n with open(file_location, 'a') as f:\n# data, addr = client_socket.recvfrom(524)\n seq_num, ack_num, ack_flag, syn_flag, fin_flag, payload = recv_data(client_socket, 524, False)\n #for normal communication, all data is always sent with 010 flag\n f.write(payload.decode())\n if fin_flag:\n logging.info(f'Received FIN - done receiving data')\n print('Received FIN - done receiving data')\n sys.exit(0)\n #otherwise continue with next expected sequence\n send_packet(client_socket, server_addr, ack_num, seq_num+len(payload)+1-12, '100')\n\n\n\n\n", "sub_path": "replicaclient.py", "file_name": "replicaclient.py", "file_ext": "py", "file_size_in_byte": 4736, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 46, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 60, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 85, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 87, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 89, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 89, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 89, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "55266244", "text": "\r\n# libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom string import digits\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt\r\nfrom gensim.models import Word2Vec\r\n\r\n\r\n# reading the data\r\ndef reading():\r\n flist = [] # list of words\r\n flist_first_element=[]\r\n whip = eval(open('1000_obj_list.txt', 'r').read())\r\n flist = [sent.split(\", \") for sent in list(whip.values())]\r\n flist_first_element= [[sent.split(\", \")[0]] for sent in list(whip.values())]\r\n flist_first_element_string= [sent.split(\", \")[0] for sent in list(whip.values())]\r\n a=[]\r\n with open('categories_places365.txt') as f:\r\n remove_digits = str.maketrans('', '', digits)\r\n for line in f:\r\n inner_list = [elt.strip() for elt in line.split(' ')]\r\n inner_list = [i.translate(remove_digits) for i in inner_list][0][3:]\r\n flist.append(inner_list.split(\"/\"))\r\n # a.append(inner_list.split(\"/\"))\r\n flist_first_element_string.append(inner_list.split(\"/\")[0])\r\n flist_first_element.append(inner_list.split(\"/\"))\r\n # a.append(inner_list.split(\"/\"))\r\n return flist, flist_first_element,flist_first_element_string\r\n\r\n# calling the reading function and save the list of features(words) in featurs_list\r\nfeaturs_list,featurs_list1,flist_first_element_string= reading()\r\nprint(featurs_list)\r\n\r\n#making the word2vec model\r\nmodel = Word2Vec(featurs_list1, min_count=1)\r\nwords = list(model.wv.vocab)\r\nword2vec_model = model[model.wv.vocab]\r\n\r\n\r\n# transform the n-dimentional model to two-dimentional model\r\npca = PCA(n_components=2)\r\nresult = pca.fit_transform(word2vec_model)\r\n\r\n\r\n# clustering with k-means \r\nk = 15 # specify the number of clusters\r\nkmeans = KMeans(n_clusters=k)\r\ny_pred = kmeans.fit_predict(result)\r\ndf = pd.DataFrame(y_pred, columns=[\"cluster\"])\r\ndf[\"c1\"] = result[:, 0]\r\ndf[\"c2\"] = result[:, 1]\r\ndf[\"c3\"] = words\r\n\r\ndf_copy = pd.DataFrame()\r\ndf_copy=df.copy()\r\n\r\nword_found=False\r\nrow_num=[]\r\nfor word in df_copy['c3']:\r\n word_found = False\r\n for word1 in flist_first_element_string:\r\n # print(int(df_copy[df_copy['c3'] == word1].index[0]))\r\n # print(df_copy[df_copy['c3'] == word1].index.item())\r\n\r\n if word==word1:\r\n print(word)\r\n word_found=True\r\n break\r\n if not word_found:\r\n print('not fond')\r\n word_found=False\r\n print(word)\r\n print(df_copy[df_copy['c3'] == word].index.item())\r\n row_num.append(df_copy[df_copy['c3'] == word].index.item())\r\n # df_copy.drop('1')\r\ndf_copy.drop(df_copy.index[row_num],inplace=True)\r\n# df_copy.drop(df_copy.index[436],inplace=True)\r\n# df_copy.drop(df_copy.index[438],inplace=True)\r\n# df_copy.drop(df_copy.index[455],inplace=True)\r\n# df_copy.drop(df_copy.index[477],inplace=True)\r\n# df_copy.drop(df_copy.index[495],inplace=True)\r\n\r\ndf_copy = df_copy.reset_index(drop=True)\r\n\r\n# compare feature list with model words\r\n# flist_first_element_string#list of feature\r\n# words # we have it from model\r\nword_found=False\r\nrow_num=[]\r\nfor word_index in range(flist_first_element_string.__len__()):\r\n word_found = False\r\n for word1 in df_copy['c3']:\r\n # print(int(df_copy[df_copy['c3'] == word1].index[0]))\r\n # print(df_copy[df_copy['c3'] == word1].index.item())\r\n\r\n if flist_first_element_string[word_index]==word1:\r\n print(word1)\r\n print(word_index)\r\n word_found=True\r\n break\r\n if not word_found:\r\n print('not fond')\r\n word_found=False\r\n print(word1)\r\n # print(df_copy[df_copy['c3'] == word].index.item())\r\n # row_num.append(df_copy[df_copy['c3'] == word].index.item())\r\n row_num.append()\r\nprint()\r\n # df.drop(df.index[[1, 3]], inplace=True)\r\n\r\n# visualize the clusters of given data\r\n# list of selected colors to each cluster\r\n# colors = [\"black\", \"red\", \"orangered\", \"olive\", \"g\", \"teal\", \"rosybrown\", \"purple\", \"c\", \"crimson\",\r\n# \"sienna\", \"gold\", \"tan\", \"magenta\", \"deepskyblue\", \"darkgrey\"]\r\n#\r\n# for index, row in df.iterrows():\r\n# plt.scatter(row[\"c1\"], row[\"c2\"], color=colors[int(row[\"cluster\"])])\r\n# # plt.annotate(row[\"c3\"], xy=(row[\"c1\"], row[\"c2\"]))\r\n#\r\n# plt.show()\r\ndiffrence_list=list(set(flist_first_element_string) - set(df_copy['c3']))\r\nprint('diffrence',list(set(flist_first_element_string) - set(df_copy['c3'].tolist())))\r\n# print(Diff(flist_first_element_string, word1))\r\n\r\n\r\n\r\nmy_set=list(set([x for x in flist_first_element_string if flist_first_element_string.count(x) > 1]))\r\nDict_dup={}\r\nfor x in flist_first_element_string:\r\n if flist_first_element_string.count(x) > 1:\r\n Dict_dup[x]=flist_first_element_string.count(x)\r\ndict_index=dict()\r\nlist_drop_clos=[]\r\nfor key in Dict_dup.keys():\r\n # index_0=flist_first_element_string.index(key)\r\n index_0=[i for i,val in enumerate(flist_first_element_string) if val==key]\r\n if index_0.__len__()==3:\r\n dict_index[key]=index_0[1:]\r\n list_drop_clos.extend(index_0[1:])\r\n else:\r\n dict_index[key] = index_0[-1]\r\n list_drop_clos.extend(index_0[1:])\r\n\r\n# dict_index\r\n# read regular clustring data\r\n# path='/home/hamid/PycharmProjects/research/code for project/K-mean semantically on features/cluster-semantic-vectors-master/clustering-symantic-relationship/regular clustring result for n=4/semantic clustering for fave object'\r\n# df = pd.read_csv(path+\"/fave_obj_predicted_ave.csv\",header=None)\r\ndf = pd.read_csv(\"fave_obj_predicted_ave.csv\",header=None)\r\n# df0 = pd.read_csv(path+\"/fave_obj_predicted_ave0.csv\",header=None)\r\ndf0 = pd.read_csv(\"fave_obj_predicted_ave0.csv\",header=None)\r\n# df1 = pd.read_csv(path+\"/fave_obj_predicted_ave1.csv\",header=None)\r\ndf1 = pd.read_csv(\"fave_obj_predicted_ave1.csv\",header=None)\r\n# df2 = pd.read_csv(path+\"/fave_obj_predicted_ave2.csv\",header=None)\r\ndf2 = pd.read_csv(\"fave_obj_predicted_ave2.csv\",header=None)\r\n# df3 = pd.read_csv(path+\"/fave_obj_predicted_ave3.csv\",header=None)\r\ndf3 = pd.read_csv(\"fave_obj_predicted_ave3.csv\",header=None)\r\n\r\n\r\ndf_cluster = pd.DataFrame()\r\ndf_cluster0 = pd.DataFrame()\r\ndf_cluster1 = pd.DataFrame()\r\ndf_cluster2 = pd.DataFrame()\r\ndf_cluster3 = pd.DataFrame()\r\n#drop regular clustering result col\r\ndf.drop(df.columns[-1],axis=1,inplace=True)\r\ndf0.drop(df0.columns[-1],axis=1,inplace=True)\r\ndf1.drop(df1.columns[-1],axis=1,inplace=True)\r\ndf2.drop(df2.columns[-1],axis=1,inplace=True)\r\ndf3.drop(df3.columns[-1],axis=1,inplace=True)\r\n\r\n# drop duplicated cols\r\n# df.drop(df.columns[list_drop_clos],axis=1,inplace=True)# save the results to file\r\ndf.drop(df.columns[list_drop_clos],axis=1,inplace=True)\r\ndf0.drop(df0.columns[list_drop_clos],axis=1,inplace=True)\r\ndf1.drop(df1.columns[list_drop_clos],axis=1,inplace=True)\r\ndf2.drop(df2.columns[list_drop_clos],axis=1,inplace=True)\r\ndf3.drop(df3.columns[list_drop_clos],axis=1,inplace=True)\r\n\r\n\r\nall_col_set=set(i for i in range(1365))\r\ndrop_col_set=set(list_drop_clos)\r\nmy_col=list(all_col_set-drop_col_set)\r\ntemp=[]\r\ntemp_dict={}\r\ndict_group_index={}\r\nfor j in range(15):\r\n temp=[]\r\n for i in list(df_copy[df_copy.iloc[:,0]==j].index.values.astype(int)):\r\n temp.append(my_col[i])\r\n temp_dict[j]=temp\r\n\r\n\r\n# code is true till here just check after this point.\r\n#mean over symantic cols clusters\r\nfor key in temp_dict.keys():\r\n df[str(key)+'mean'] = df[ temp_dict[key]].mean(axis=1)\r\n df0[str(key)+'mean'] = df0[ temp_dict[key]].mean(axis=1)\r\n df1[str(key)+'mean'] = df1[ temp_dict[key]].mean(axis=1)\r\n df2[str(key)+'mean'] = df2[ temp_dict[key]].mean(axis=1)\r\n df3[str(key)+'mean'] = df3[ temp_dict[key]].mean(axis=1)\r\n\r\n df_cluster[str(key) + 'mean'] = df[str(key) + 'mean']\r\n df_cluster0[str(key) + 'mean'] = df0[str(key) + 'mean']\r\n df_cluster1[str(key) + 'mean'] = df1[str(key) + 'mean']\r\n df_cluster2[str(key) + 'mean'] = df2[str(key) + 'mean']\r\n df_cluster3[str(key) + 'mean'] = df3[str(key) + 'mean']\r\n\r\n\r\ndf_cluster_mean = pd.DataFrame()\r\ndf_cluster0_mean = pd.DataFrame()\r\ndf_cluster1_mean= pd.DataFrame()\r\ndf_cluster2_mean= pd.DataFrame()\r\ndf_cluster3_mean = pd.DataFrame()\r\nave_df_cluster=[]\r\nave_df_cluster0=[]\r\nave_df_cluster1=[]\r\nave_df_cluster2=[]\r\nave_df_cluster3=[]\r\nfor key in range(len(df_cluster.columns)):\r\n ave_df_cluster.append(df_cluster[str(key)+'mean'].mean())\r\n ave_df_cluster0.append(df_cluster0[str(key)+'mean'].mean())\r\n ave_df_cluster1.append(df_cluster1[str(key)+'mean'].mean())\r\n ave_df_cluster2.append(df_cluster2[str(key)+'mean'].mean())\r\n ave_df_cluster3.append(df_cluster3[str(key)+'mean'].mean())\r\n\r\n# path1='/home/hamid/PycharmProjects/research/code for project/result1/semantic'\r\n# path1='semantic/faveCnnObj'\r\npath1='D:/rojiyar/my paper/result__/semantic clustring/fave_obj'\r\ndf_ave_df_cluster = pd.DataFrame(ave_df_cluster)\r\n# df_ave_df_cluster.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_ave_df_cluster.csv\",header=False,index=False)\r\ndf_ave_df_cluster.to_csv(path1+\"/faveCnnObj_ave_df_cluster.csv\",header=False,index=False)\r\n\r\ndf_ave_df_cluster0 = pd.DataFrame(ave_df_cluster0)\r\n# df_ave_df_cluster0.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_ave_df_cluster0.csv\",header=False,index=False)\r\ndf_ave_df_cluster0.to_csv(path1+\"/faveCnnObj_ave_df_cluster0.csv\",header=False,index=False)\r\n\r\ndf_ave_df_cluster1 = pd.DataFrame(ave_df_cluster1)\r\n# df_ave_df_cluster1.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_ave_df_cluster1.csv\",header=False,index=False)\r\ndf_ave_df_cluster1.to_csv(path1+\"/faveCnnObj_ave_df_cluster1.csv\",header=False,index=False)\r\n\r\ndf_ave_df_cluster2 = pd.DataFrame(ave_df_cluster2)\r\n# df_ave_df_cluster2.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_ave_df_cluster2.csv\",header=False,index=False)\r\ndf_ave_df_cluster2.to_csv(path1+\"/faveCnnObj_ave_df_cluster2.csv\",header=False,index=False)\r\n\r\ndf_ave_df_cluster3 = pd.DataFrame(ave_df_cluster3)\r\n# df_ave_df_cluster3.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_ave_df_cluster3.csv\",header=False,index=False)\r\ndf_ave_df_cluster3.to_csv(path1+\"/faveCnnObj_ave_df_cluster3.csv\",header=False,index=False)\r\n\r\n# df.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic.csv\", header=False, index=False)\r\ndf.to_csv(path1+\"/faveCnnObj_predicted_symantic.csv\", header=False, index=False)\r\n# df0.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic0.csv\", header=False, index=False)\r\ndf0.to_csv(path1+\"/faveCnnObj_predicted_symantic0.csv\", header=False, index=False)\r\n# df1.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic1.csv\", header=False, index=False)\r\ndf1.to_csv(path1+\"/faveCnnObj_predicted_symantic1.csv\", header=False, index=False)\r\n# df2.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic2.csv\", header=False, index=False)\r\ndf2.to_csv(path1+\"/faveCnnObj_predicted_symantic2.csv\", header=False, index=False)\r\n# df3.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic3.csv\", header=False, index=False)\r\ndf3.to_csv(path1+\"/faveCnnObj_predicted_symantic3.csv\", header=False, index=False)\r\n\r\n# df_cluster.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic_final.csv\", header=True, index=False)\r\ndf_cluster.to_csv(path1+\"/faveCnnObj_predicted_symantic_final.csv\", header=True, index=False)\r\n# df_cluster0.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic0_final.csv\", header=False, index=False)\r\ndf_cluster0.to_csv(path1+\"/faveCnnObj_predicted_symantic0_final.csv\", header=False, index=False)\r\n# df_cluster1.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic1_final.csv\", header=False, index=False)\r\ndf_cluster1.to_csv(path1+\"/faveCnnObj_predicted_symantic1_final.csv\", header=False, index=False)\r\n# df_cluster2.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic2_final.csv\", header=False, index=False)\r\ndf_cluster2.to_csv(path1+\"/faveCnnObj_predicted_symantic2_final.csv\", header=False, index=False)\r\n# df_cluster3.to_csv(r\"/home/hamid/PycharmProjects/research/code for project/result1/semantic/faveCnnObj/faveCnnObj_predicted_symantic3_final.csv\", header=False, index=False)\r\ndf_cluster3.to_csv(path1+\"/faveCnnObj_predicted_symantic3_final.csv\", header=False, index=False)\r\n\r\n\r\n# df_copy.to_csv(\"results1.csv\")\r\nprint('bye')\r\n", "sub_path": "semantic clustering for fave object/my_cluster.py", "file_name": "my_cluster.py", "file_ext": "py", "file_size_in_byte": 13062, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "string.digits", "line_number": 22, "usage_type": "argument"}, {"api_name": "gensim.models.Word2Vec", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 153, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 155, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 157, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 159, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 163, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 164, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 165, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 166, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 212, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 213, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 214, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 215, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 216, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 232, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 236, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 240, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 244, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "95782750", "text": "\"\"\"Define an app for working with TTS (over Sonos).\"\"\"\nfrom typing import Tuple\n\nfrom core import Base\nfrom util.dt import relative_time_of_day\n\nOPENER_FILE_URL = 'https://hass.myserver.com/local/tts_opener.mp3'\n\n\nclass TTS(Base):\n \"\"\"Define a class to represent the app.\"\"\"\n\n def configure(self) -> None:\n \"\"\"Configure.\"\"\"\n self._last_spoken_text = \"\"\n self.register_endpoint(self._emergency_endpoint, \"emergency\")\n self.register_endpoint(self._tts_endpoint, \"tts\")\n\n @staticmethod\n def _calculate_iterated_text(text: str, iterations: int = 1) -> str:\n \"\"\"Return a string that equals itself times a number of iterations.\"\"\"\n return \" Once again, \".join([text] * iterations)\n\n def _emergency_endpoint(self, data: dict) -> Tuple[dict, int]:\n \"\"\"Define an endpoint to alert us of an emergency.\"\"\"\n if self.presence_manager.noone(self.presence_manager.HomeStates.home):\n return {\"status\": \"ok\", \"message\": \"No one home; ignoring\"}, 200\n\n try:\n name = data[\"name\"].title()\n except KeyError:\n return ({\"status\": \"error\", \"message\": 'Missing \"name\" parameter'}, 502)\n\n self.log(\"Emergency Notification from %s\", name)\n\n statement = f\"Please call {name} as soon as possible.\"\n self.speak(statement, iterations=3)\n return {\"status\": \"ok\", \"message\": statement}, 200\n\n def _on_end(self, kwargs: dict) -> None:\n \"\"\"Restore the Sonos to its previous state after speech is done.\"\"\"\n master_sonos_player = kwargs[\"master_sonos_player\"]\n\n master_sonos_player.play_file(OPENER_FILE_URL)\n self.run_in(self._on_restore, 3.25)\n\n def _on_pre_end(self, kwargs: dict) -> None:\n \"\"\"Calculate how long the TTS should play.\"\"\"\n master_sonos_player = kwargs[\"master_sonos_player\"]\n\n duration = self.get_state(str(master_sonos_player), attribute=\"media_duration\")\n if not duration:\n self.error(\"Couldn't calculate ending duration for TTS\")\n return\n\n self.run_in(self._on_end, duration, master_sonos_player=master_sonos_player)\n\n def _on_restore(self, kwargs: dict) -> None:\n \"\"\"Restore the Sonos to its previous state after speech is done.\"\"\"\n if self.living_room_tv.current_activity_id:\n self.living_room_tv.play()\n self.sonos_manager.ungroup_all()\n self.sonos_manager.restore_all()\n\n def _on_speak(self, kwargs: dict) -> None:\n \"\"\"Restore the Sonos to its previous state after speech is done.\"\"\"\n master_sonos_player = kwargs[\"master_sonos_player\"]\n text = kwargs[\"text\"]\n\n self.call_service(\n \"tts/amazon_polly_say\", entity_id=str(master_sonos_player), message=text\n )\n\n self.run_in(self._on_pre_end, 2, master_sonos_player=master_sonos_player)\n\n def _tts_endpoint(self, data: dict) -> Tuple[dict, int]:\n \"\"\"Define an API endpoint to handle incoming TTS requests.\"\"\"\n if self.presence_manager.noone(self.presence_manager.HomeStates.home):\n return {\"status\": \"ok\", \"message\": \"No one home; ignoring\"}, 200\n\n try:\n text = data[\"text\"]\n except KeyError:\n return ({\"status\": \"error\", \"message\": 'Missing \"text\" parameter'}, 502)\n\n self.log(\"Received TTS data: %s\", data)\n\n self.speak(text, iterations=data.get(\"iterations\", 1))\n return {\"status\": \"ok\", \"message\": data[\"text\"]}, 200\n\n def repeat(self, iterations: int = 1) -> None:\n \"\"\"Repeat the last thing that was spoken.\"\"\"\n if self._last_spoken_text:\n final_string = self._calculate_iterated_text(\n self._last_spoken_text, iterations\n )\n\n self.log(\"Repeating over TTS: %s\", final_string)\n self.speak(final_string)\n\n def speak(self, text: str, iterations: int = 1) -> None:\n \"\"\"Speak the provided text through the Sonos, pausing as needed.\"\"\"\n final_string = self._calculate_iterated_text(text, iterations)\n\n self.sonos_manager.snapshot_all()\n self.sonos_manager.set_all_volume()\n master_sonos_player = self.sonos_manager.group()\n master_sonos_player.play_file(OPENER_FILE_URL)\n\n if self.living_room_tv.current_activity_id:\n self.living_room_tv.pause()\n\n self.log(\"Speaking over TTS: %s\", final_string)\n\n self.run_in(\n self._on_speak,\n 3.25,\n master_sonos_player=master_sonos_player,\n text=f\"Good {relative_time_of_day(self)}. {final_string}\",\n )\n\n self._last_spoken_text = text\n", "sub_path": "appdaemon/settings/apps/tts.py", "file_name": "tts.py", "file_ext": "py", "file_size_in_byte": 4637, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "core.Base", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 76, "usage_type": "name"}, {"api_name": "util.dt.relative_time_of_day", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "26315896", "text": "from django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.db import models as m\nfrom . import models\nfrom django.db.models import Manager\nfrom django.utils.html import format_html\nfrom django import forms\nfrom django.contrib.postgres.fields import ArrayField\n\n\nclass UserManager(Manager):\n pass\n\n\nclass UserProxy(User):\n class Meta:\n proxy = True\n\n objects = UserManager()\n\n\nclass HistoryProxy(models.SearchedItems):\n \"\"\"overrides generating HTML tag for img_url\"\"\"\n class Meta:\n proxy = True\n\n def img_tag(self):\n return format_html(\"\",\n self.url, self.img_url)\n\n\nclass InlineEbaySettings(admin.StackedInline):\n model = models.EbayUserSettings\n formfield_overrides = {\n ArrayField: {\"widget\": forms.Textarea}\n }\n\n\nclass InlineAukroSettings(admin.StackedInline):\n model = models.AukroUserSettings\n formfield_overrides = {\n ArrayField: {\"widget\": forms.Textarea}\n }\n\nclass InlineHistory(admin.TabularInline):\n model = HistoryProxy\n fields = (\"img_tag\", \"expires\", \"bid_price\", \"bid_currency\",\n \"buy_price\", \"buy_currency\")\n readonly_fields = fields\n empty_value_display = '--'\n\n\nclass InlineProfile(admin.StackedInline):\n model = models.Profile\n #list_display = (\"username\", \"email_rcvr\", \"aukro_module\", \"ebay_module\")\n fields = (\"email_rcvr\", \"aukro_module\", \"ebay_module\")\n #list_display += (\"title\", \"item_id\")\n\n@admin.register(UserProxy)\nclass UserProfile(admin.ModelAdmin):\n inlines = [InlineProfile, InlineAukroSettings, InlineEbaySettings,\n InlineHistory]\n fields = (\"email\", )\n readonly_fields = (\"email\", )", "sub_path": "auctweb/apps/aserver/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1736, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.models.Manager", "line_number": 11, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 15, "usage_type": "name"}, {"api_name": "django.utils.html.format_html", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.postgres.fields.ArrayField", "line_number": 35, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.postgres.fields.ArrayField", "line_number": 42, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 42, "usage_type": "name"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 45, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 53, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 60, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "192165529", "text": "\"\"\"\nClass for reading and storing KG1 signals.\n\nInherits from SignalBase (signal_base.py).\n\nAdditional functionality for correcting fringe\njumps and storing status flags\n\"\"\"\n\n# import copy\nimport logging\n\nimport numpy as np\n\n# from make_plots import make_plot\nfrom models.signal_base import SignalBase\n\n# from kg4_data import Kg4Data\nfrom utilities.library import *\n\nlogger = logging.getLogger(__name__)\n\n# ----------------------------\n__author__ = \"B. Viola\"\n# ----------------------------\n\n\nclass SignalKg1(SignalBase):\n\n # ------------------------\n def __init__(self, constants, shot_no):\n \"\"\"\n Init function\n\n :param constants: Instance of Kg1Consts, containing useful constants & all node names for PPF & JPFs\n\n \"\"\"\n self.signal_type = \"\" # kg1r, kg1c, kg1v\n self.dcn_or_met = \"\" # dcn, met\n self.corrections = SignalBase(\n constants\n ) # Corrections that have been made\n self.correction_type = np.arange(\n 0\n ) # For debugging : to keep track of where corrections have been made\n self.correction_dcn = np.arange(0) # For use with the lateral channels\n self.correction_met = np.arange(0) # For use with the lateral channels\n\n self.pulse = shot_no\n\n super(SignalKg1, self).__init__(constants)\n\n self.dfr = self.constants.DFR_DCN\n self.dfr_met = self.constants.DFR_MET\n\n # ------------------------\n def __deepcopy__(self, memo):\n \"\"\"\n Override deepcopy. Only a few of the attributes need to be copied.\n\n :param memo:\n\n \"\"\"\n dpcpy = self.__class__(self.constants, 0)\n\n if self.corrections.data is not None:\n dpcpy.corrections.data = self.corrections.data.copy()\n if self.corrections.time is not None:\n dpcpy.corrections.time = self.corrections.time.copy()\n if self.correction_type is not None:\n dpcpy.correction_type = self.correction_type.copy()\n if self.correction_dcn is not None:\n dpcpy.correction_dcn = self.correction_dcn.copy()\n if self.correction_met is not None:\n dpcpy.correction_met = self.correction_met.copy()\n if self.data is not None:\n dpcpy.data = self.data.copy()\n if self.time is not None:\n dpcpy.time = self.time.copy()\n dpcpy.dfr = self.dfr\n dpcpy.signal_type = self.signal_type\n dpcpy.dcn_or_met = self.dcn_or_met\n\n return dpcpy\n\n # ------------------------\n def uncorrect_fj(self, corr, index, fringe_vib=None):\n \"\"\"\n Uncorrect a fringe jump by corr, from the time corresponding to index onwards.\n Not used ATM. Will need more testing if we want to use it... Suspect isclose is wrong.\n 07mar2019\n used this function instead of is close as there is an issue with types and the value we are looking for\n sometimes are not found\n\n :param corr: Correction to add to the data\n :param index: Index from which to make the correction\n\n \"\"\"\n # # Check we made a correction at this time.\n\n ind_corr, value = find_nearest(self.corrections.time, self.time[index])\n # ind_corr = np.where(np.isclose(self.corrections.time, self.time[index], atol=1e-3, rtol=1e-6) == 1)\n if np.size(ind_corr) == 0:\n logger.error(\"no correction to Undo!\")\n return\n logger.log(\n 5,\n \"From index {}, time {}, subtracting {} ({} fringes)\".format(\n ind_corr, value, corr, corr / self.constants.DFR_DCN\n ),\n )\n # Uncorrect correction\n if fringe_vib is None:\n self.data[index:] = self.data[index:] + corr\n\n else:\n self.data[index:] = self.data[index:] + fringe_vib\n\n self.corrections.data = np.delete(self.corrections.data, ind_corr)\n self.corrections.time = np.delete(self.corrections.time, ind_corr)\n\n # ------------------------\n def correct_fj(\n self,\n corr,\n time=None,\n index=None,\n store=True,\n corr_dcn=None,\n corr_met=None,\n lid=None,\n ):\n \"\"\"\n Shifts all data from time onwards, or index onwards,\n down by corr. Either time or index must be specified\n\n :param corr: The correction to be subtracted\n :param time: The time from which to make the correction (if this is specified index is ignored)\n :param index: The index from which to make the correction\n :param store: To record the correction set to True\n :param corr_dcn: Only for use with lateral channels. Stores the correction,\n in terms of the number of FJ in DCN laser (as opposed to in the combined density)\n :param corr_met: Only for use with lateral channels. Stores the correction,\n in terms of the number of FJ in the MET laser (as opposed to the correction in the vibration)\n\n \"\"\"\n\n if time is None and index is None:\n logger.warning(\n \"No time or index was specified for making the FJ correction.\"\n )\n return\n\n if time is not None:\n index = (np.where(self.time > time),)\n if np.size(index) == 0:\n logger.warning(\n \"Could not find time near {} for making the FJ correction.\".format(\n time\n )\n )\n return\n\n index = np.min(index)\n\n if index == len(self.data):\n index = index - 1\n self.data[index:] = self.data[index:] - corr\n\n # Store correction in terms of number of fringes\n if lid is None:\n if self.dcn_or_met == \"met\":\n corr_store = int(corr / self.constants.DFR_MET)\n else:\n corr_store = int(corr / self.constants.DFR_DCN)\n else:\n corr_store = lid\n\n logger.log(\n 5,\n \"From index {}, time {}, subtracting {} \".format(\n index, self.time[index], corr\n ),\n )\n\n # If this is a mirror movement signal, store raw correction\n if \"vib\" in self.signal_type:\n corr_store = corr\n\n if store:\n # Store in terms of the number of fringes for density, or vibration itself for vibration\n if self.corrections.data is None:\n self.corrections.data = np.array([corr_store])\n self.corrections.time = np.array([self.time[index]])\n else:\n self.corrections.data = np.append(\n self.corrections.data, corr_store\n )\n self.corrections.time = np.append(\n self.corrections.time, self.time[index]\n )\n\n # Also store corresponding correction for the DCN & MET lasers (for use with lateral channels only)\n if self.dcn_or_met == \"dcn\":\n self.correction_dcn = np.append(self.correction_dcn, corr_dcn)\n\n if self.dcn_or_met == \"met\":\n self.correction_met = np.append(self.correction_met, corr_met)\n\n # now sorting corrections\n index = sorted(\n range(len(self.corrections.time)),\n key=lambda k: self.corrections.time[k],\n )\n self.corrections.data = self.corrections.data[index]\n self.corrections.time = self.corrections.time[index]\n", "sub_path": "python/models/signal_kg1.py", "file_name": "signal_kg1.py", "file_ext": "py", "file_size_in_byte": 7497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "models.signal_base.SignalBase", "line_number": 28, "usage_type": "name"}, {"api_name": "models.signal_base.SignalBase", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "179343230", "text": "import ccxt\nimport config\nimport schedule\nimport pandas as pd\npd.set_option('display.max_rows', None)\nimport talib\nimport pprint\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nfrom datetime import datetime\nimport time\n\nTrade_quantity = 0.05\npnl = 0\n\nexchange = ccxt.binance({\n \"apiKey\": config.BINANCE_API_KEY,\n \"secret\": config.BINANCE_SECRET_KEY\n})\n\n\ndef tr(data):\n data['previous_close'] = data['close'].shift(1)\n data['high-low'] = abs(data['high'] - data['low'])\n data['high-pc'] = abs(data['high'] - data['previous_close'])\n data['low-pc'] = abs(data['low'] - data['previous_close'])\n\n tr = data[['high-low', 'high-pc', 'low-pc']].max(axis=1)\n\n return tr\n\n\ndef rsi(df, RSI_PERIOD =14):\n np_closes = df['close']\n rsi = talib.RSI(np_closes, RSI_PERIOD)\n df['rsi']=rsi\n\n return rsi\n\n\ndef adx(df, ADX_PERIOD=14):\n high = df['high']\n low = df['low']\n close = df['close']\n adx = talib.ADX(high, low, close, ADX_PERIOD)\n df['adx'] = adx\n\n return adx\n\n\ndef psar(df, strategy, index):\n #e.g : real = SAR(high, low, acceleration=0, maximum=0)\n high = df['high']\n low = df['low']\n psar = talib.SAR(high, low, strategy['acceleration'], strategy['maximum'])\n df['psar'+str(index)] = psar\n \n return psar\n\n\ndef atr(data, period):\n data['tr'] = tr(data)\n atr = data['tr'].rolling(period).mean()\n \n return atr\n\ndef epsar(df,strategy):\n #e.g : real = SAREXT(high, low, startvalue=0, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0, accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0)\n high = df['high']\n low = df['low']\n epsar = talib.SAREXT(high, low, strategy['start'], 0, strategy['acceleration'], strategy['acceleration'], strategy['maximum'], strategy['acceleration'], strategy['acceleration'], strategy['maximum'])\n df['epsar'] = epsar\n \n return epsar\n\ndef supertrend(df, period=7, atr_multiplier=7):\n hl2 = (df['high'] + df['low']) / 2\n df['atr'] = atr(df, period)\n df['upperband'] = hl2 + (atr_multiplier * df['atr'])\n df['lowerband'] = hl2 - (atr_multiplier * df['atr'])\n df['in_uptrend'] = True\n\n for current in range(1, len(df.index)):\n previous = current - 1\n\n if df['close'][current] > df['upperband'][previous]:\n df['in_uptrend'][current] = True\n elif df['close'][current] < df['lowerband'][previous]:\n df['in_uptrend'][current] = False\n else:\n df['in_uptrend'][current] = df['in_uptrend'][previous]\n\n if df['in_uptrend'][current] and df['lowerband'][current] < df['lowerband'][previous]:\n df['lowerband'][current] = df['lowerband'][previous]\n\n if not df['in_uptrend'][current] and df['upperband'][current] > df['upperband'][previous]:\n df['upperband'][current] = df['upperband'][previous]\n\n #print(df)\n return df\n\nin_position = False\n\ndef check_buy_sell_signals(df):\n global in_position\n global pnl\n global strategy\n global last_bought\n print(\"checking for buy and sell signals\")\n print(df.tail(5))\n last_row_index = len(df.index) - 1\n previous_row_index = last_row_index - 1\n\n shortIndicators = {}\n longIndicators = {}\n\n #if position false\n #check RSI \n if df['rsi'][last_row_index] >= 70 :\n longIndicators[\"rsi\"] = True\n shortIndicators[\"rsi\"] = False\n elif df['rsi'][last_row_index] <= 30 :\n shortIndicators[\"rsi\"] = True\n longIndicators[\"rsi\"] = False\n else:\n shortIndicators[\"rsi\"] = False\n longIndicators[\"rsi\"] = False\n \n #check PSARs\n for idx, psarStrat in enumerate(strategy['psar']):\n if df['psar'+str(idx)][last_row_index] < df['close'][last_row_index] :\n longIndicators[\"psar\"+str(idx)] = True\n shortIndicators[\"psar\"+str(idx)] = False\n else :\n longIndicators[\"psar\"+str(idx)] = False\n shortIndicators[\"psar\"+str(idx)] = True\n\n #check SUPERTREND\n if df['in_uptrend'][last_row_index]:\n longIndicators[\"supertrend\"] = True\n shortIndicators[\"supertrend\"] = False\n else :\n longIndicators[\"supertrend\"] = False\n shortIndicators[\"supertrend\"] = True\n \n #check ADX\n\n if df['adx'][last_row_index] >= 40 :\n shortIndicators[\"adx\"] = True\n longIndicators[\"adx\"] = True\n else:\n shortIndicators[\"adx\"] = False\n longIndicators[\"adx\"] = False\n\n # lenadx = 14 #input(14, minval=1, title=\"DI Length\")\n # lensig = 14 #(14, title=\"ADX Smoothing\", minval=1, maxval=50)\n # limadx = 18 #(18, minval=1, title=\"ADX MA Active\")\n # up = df['high'].diff()\n # down = df['low'].diff()\n # trur = talib.EMA(df['close'], lenadx)\n # plus = 100 * talib.EMA(up if (up > down).any() and (up > 0).any() else 0, lenadx) / trur\n # minus = 100 * talib.EMA(down if (down > up).any() and (down > 0).any() else 0, lenadx) / trur\n # sum = plus + minus\n # adx = 100 * talib.EMA((plus - minus) / 1 if (sum == 0).any() else sum, lensig)\n # if (adx > limadx).any() and (plus > minus).any():\n # shortIndicators[\"adx\"] = True\n # longIndicators[\"adx\"] = False\n # else:\n # if (adx > limadx).any() and (plus < minus).any():\n # shortIndicators[\"adx\"] = True\n # longIndicators[\"adx\"] = False\n # else:\n # shortIndicators[\"adx\"] = False\n # longIndicators[\"adx\"] = False\n\n \n print(\"short\",shortIndicators)\n print(\"long\",longIndicators)\n\n if not df['in_uptrend'][previous_row_index] and df['in_uptrend'][last_row_index]:\n print(\"changed to uptrend, buy\")\n if not in_position:\n order = exchange.create_market_buy_order('ETH/BUSD', Trade_quantity)\n pnl -= order['cost']\n print(order)\n print(\"Buy order\")\n else:\n print(\"already in position, nothing to do\")\n \n if df['in_uptrend'][previous_row_index] and not df['in_uptrend'][last_row_index]:\n if in_position:\n print(\"changed to downtrend, sell\")\n order = exchange.create_market_sell_order('ETH/BUSD', Trade_quantity)\n print(order)\n pnl += order['cost']\n print(\"Sell order\")\n else:\n print(\"You aren't in position, nothing to sell\")\n\n \n#print and return the balance eg(balance('BUSD','free')\ndef balance(asset, type='free'):\n print(asset,\" : \",exchange.fetch_balance().get(asset).get(type))\n\n return exchange.fetch_balance().get(asset).get(type)\n\n\n#Check if you are already on position\ndef position():\n global in_position\n if balance('ETH') >= Trade_quantity:\n in_position = True\n else:\n in_position = False\n\n print(in_position)\n return in_position\n\nclass dataframe():\n def initDatas(strategy):\n tf = strategy['timeframe']\n print(\"Class Dataframe \",tf)\n bars = exchange.fetch_ohlcv('ETH/BUSD', timeframe=tf, limit=100)\n df = pd.DataFrame(bars[:-1], columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])\n df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')\n supertrend(df,strategy['supertrend']['period'],strategy['supertrend']['atr_multiplier'])\n rsi(df,strategy['rsi']['rsi_period'])\n adx(df, strategy['adx']['adx_period'])\n epsar(df, strategy['epsar'])\n for idx, psarStrat in enumerate(strategy['psar']):\n psar(df, psarStrat, idx)\n check_buy_sell_signals(df)\n\n \ndef run_bot():\n global pnl, strategy\n position()\n\n strategy = {\n \"timeframe\":\"1h\",\n \"rsi\":{\n \"rsi_period\":14\n },\n \"adx\":{\n \"adx_period\":14\n },\n \"psar\":[\n {\n \"acceleration\":0.02,\n \"maximum\":0.2\n },\n {\n \"acceleration\":0.01,\n \"maximum\":0.2\n }\n ],\n \"supertrend\":{\n \"period\":7, \n \"atr_multiplier\":4\n },\n \"epsar\":{\n \"start\":0.015,\n \"acceleration\":0.01,\n \"maximum\":0.2\n }\n }\n\n #print(f\"Fetching new bars for {datetime.now().isoformat()}\")\n dataframe.initDatas(strategy)\n print(\"PNL = \", pnl)\n\n\nschedule.every(5).seconds.do(run_bot)\n\nwhile True:\n try:\n schedule.run_pending()\n time.sleep(1)\n\n except Exception as e:\n print(\"an exception occured - {}\".format(e))\n schedule.every(1).seconds.do(run_bot)\n", "sub_path": "supertrend.py", "file_name": "supertrend.py", "file_ext": "py", "file_size_in_byte": 8567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pandas.set_option", "line_number": 5, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 11, "usage_type": "call"}, {"api_name": "ccxt.binance", "line_number": 20, "usage_type": "call"}, {"api_name": "config.BINANCE_API_KEY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.BINANCE_SECRET_KEY", "line_number": 22, "usage_type": "attribute"}, {"api_name": "talib.RSI", "line_number": 39, "usage_type": "call"}, {"api_name": "talib.ADX", "line_number": 49, "usage_type": "call"}, {"api_name": "talib.SAR", "line_number": 59, "usage_type": "call"}, {"api_name": "talib.SAREXT", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 228, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 229, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 277, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 281, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 282, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 286, "usage_type": "call"}]} +{"seq_id": "264757640", "text": "# bot.py\nimport os\nimport random\nfrom datetime import datetime\n\nfrom dotenv import load_dotenv\nfrom discord import *\nfrom discord.ext import commands\n\n# from discord_slash import SlashCommand\n# from discord_slash.utils.manage_commands import create_option\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\n# TOKEN = 'OVERRIDE'\n\nprefix = 'd!'\ngameStatus = \"\"\n\n# activity = Game(name=gameStatus)\n# activity = Streaming(name=\"c!help\", url=\"twitch_url_here\")\n# activity = Activity(type=ActivityType.listening, name=\"!help\")\n# activity = Activity(type=ActivityType.watching, name=\"!help\")\n\nbot = commands.Bot(command_prefix=prefix, status=Status.idle)\nclient = Client(intents=Intents.all())\n# slash = SlashCommand(client, sync_commands=True)\n\n# 738488607261851748 Awesome Realm Official\n# 674791516249653277 CAS Testing Server\ntestServers = [738488607261851748, \n 674791516249653277]\n\nbot_owners = [642527833410895882]\nbot_masters = [642527833410895882]\n\nforward_servers = [674791516249653277]\n\n@bot.command(name='embedtest')\nasync def embedtest(ctx):\n colorcode = int(\"0x%02x%02x%02x\" % (random.randint(0,255), random.randint(0,255), random.randint(0,255)), 16)\n testembed = Embed(title=\"Test Title\",description=\"Test Description\",color=colorcode)\n await ctx.send(embed=testembed)\n\n@client.event\nasync def on_message(message):\n guild = message.guild\n log_channel = utils.get(guild.channels, name=\"log-test\")\n if log_channel is None:\n await client.process_commands(message)\n return\n else:\n embed=Embed(\n color=0xffd700,\n timestamp=datetime.utcnow(),\n description=\"in {}:\\n{}\".format(message.channel.mention, message.content)\n )\n embed.set_author(name=message.author, icon_url=message.author.avatar_url)\n embed.set_footer(text=message.author.id)\n if len(message.attachments) > 0:\n embed.set_image(url = message.attachments[0].url)\n await log_channel.send(embed=embed)\n await client.process_commands(message)\n\nbot.run(TOKEN)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 25, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "338752313", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portfolio', '0009_auto_20141014_0325'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Collections',\n fields=[\n ('col_id', models.AutoField(serialize=False, primary_key=True)),\n ('collection', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='gallery',\n name='collection',\n field=models.ForeignKey(to='portfolio.Collections'),\n ),\n migrations.AlterField(\n model_name='gallery',\n name='post_desc',\n field=models.CharField(max_length=500, null=True, blank=True),\n ),\n ]\n", "sub_path": "portfolio/migrations/0010_auto_20141014_0343.py", "file_name": "0010_auto_20141014_0343.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "416070607", "text": "import pandas as pd\nimport numpy as np\nimport tkinter as tk\nfrom tkinter import *\nimport quandl\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\n\n\n\nclass bollinger:\n def __init__(self,sname,data):\n self.sname=sname\n self.data=data\n root=tk.Tk()\n self.master=root\n root.title(\"graph of Stock\")\n root.geometry(\"500x600\")\n self.plotgraph()\n root.mainloop()\n \n def plotgraph1(self,a):\n rm=self.data.rolling(window=5,center=False).mean()\n rstd=self.data.rolling(window=5,center=False).std()\n upper_band=rm+2*rstd\n lower_band=rm-2*rstd\n a.plot(self.data,label=\"Open values\")\n a.plot(upper_band,label=\"Bollinger upper band\")\n a.plot(lower_band,label=\"Bollinger lower band\")\n a.set_xlabel(\"Date\")\n a.set_ylabel(\"Price\")\n a.set_title(self.sname)\n a.legend(loc='upper left') \n \n def plotgraph(self):\n f = Figure(figsize=(5,5), dpi=100)\n a = f.add_subplot(111)\n self.plotgraph1(a)\n canvas = FigureCanvasTkAgg(f, self.master)\n canvas.show()\n canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)\n\n toolbar = NavigationToolbar2TkAgg(canvas, self.master)\n toolbar.update()\n canvas._tkcanvas.pack(fill=BOTH)\n\n", "sub_path": "file98.py", "file_name": "file98.py", "file_ext": "py", "file_size_in_byte": 1438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "matplotlib.use", "line_number": 7, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "168862784", "text": "from PIL import Image\nimport os\nimport random\n\n\ndef get_sample():\n \"\"\" Get a random image of a sudoku from\n the directory sudoku_images\"\"\"\n num = random.randint(1, get_ammount())\n filename = f\"sudoku{num}.jpg\"\n return Image.open(filename)\n\n\ndef get_ammount():\n list = os.listdir()\n return len(list) - 2\n", "sub_path": "sudoku_images/images.py", "file_name": "images.py", "file_ext": "py", "file_size_in_byte": 324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "random.randint", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 11, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "275850793", "text": "from app.gui import add_transaction\nfrom app import entity_dialog\nfrom app.db.models import Transactions, Entities\nfrom app.db import DatabaseHelper\n\nimport datetime\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n#DATABASE IMPORTS\n\nclass Main(add_transaction.Ui_Dialog):\n def __init__(self):\n self.db = DatabaseHelper()\n self.curr_month = self.db.get_current_month()\n\n def launch(self, Dialog, l_type):\n self.Dialog = Dialog\n self.l_type = l_type\n self.setupUi(Dialog)\n\n self.set_connections()\n self.set_header()\n self.dateTimeEdit.setDateTime(datetime.datetime.now())\n self.load_types_combo()\n\n if self.l_type == Transactions.LAUNCH_EDIT:\n self.populate_fields()\n\n Dialog.exec()\n\n def launch_edit(self, Dialog, transaction_id):\n self.transaction = self.db.get_transaction(transaction_id)\n self.launch(Dialog, Transactions.LAUNCH_EDIT)\n\n def populate_fields(self):\n if self.transaction.from_entity.type == Entities.TYPE_INCOME:\n self.typeCombo.setCurrentIndex(2)\n elif self.transaction.to_entity.type == Entities.TYPE_EXPENSE:\n self.typeCombo.setCurrentIndex(0)\n\n self.amountSpinBox.setValue(self.transaction.value)\n\n for i in range(0, len(self.from_objects)):\n e_object = self.from_objects[i]\n if e_object.id == self.transaction.from_entity.id:\n self.fromCombo.setCurrentIndex(i)\n\n for i in range(0, len(self.to_objects)):\n e_object = self.to_objects[i]\n if e_object.id == self.transaction.to_entity.id:\n self.toCombo.setCurrentIndex(i)\n\n self.commentLineEdit.setText(self.transaction.comment)\n\n t_date = datetime.datetime.fromtimestamp(self.transaction.timestamp)\n self.dateTimeEdit.setDateTime(t_date)\n\n\n def load_types_combo(self):\n self.typeCombo.addItem('Expense')\n self.typeCombo.addItem('Transfer')\n self.typeCombo.addItem('Income')\n\n def load_combos(self):\n\n self.fromCombo.clear()\n self.toCombo.clear()\n\n self.from_objects = []\n self.to_objects = []\n\n if self.typeCombo.currentText() == 'Income':\n incomes = self.db.get_entities(Entities.TYPE_INCOME, self.curr_month)\n assets = self.db.get_entities(Entities.TYPE_ASSET, self.curr_month)\n\n for i in range(0, len(incomes)):\n income = incomes[i]\n self.fromCombo.addItem(income.name)\n self.from_objects.append(income)\n\n for i in range(0, len(assets)):\n asset = assets[i]\n self.toCombo.addItem(asset.name)\n self.to_objects.append(asset)\n\n elif self.typeCombo.currentText() == 'Expense':\n assets = self.db.get_entities(Entities.TYPE_ASSET, self.curr_month)\n expenses = self.db.get_entities(Entities.TYPE_EXPENSE, self.curr_month)\n\n for i in range(0, len(assets)):\n asset = assets[i]\n self.fromCombo.addItem(asset.name)\n self.from_objects.append(asset)\n\n for i in range(0, len(expenses)):\n expense = expenses[i]\n self.toCombo.addItem(expense.name)\n self.to_objects.append(expense)\n\n elif self.typeCombo.currentText() == 'Transfer':\n assets = self.db.get_entities(Entities.TYPE_ASSET, self.curr_month)\n for i in range(0, len(assets)):\n asset = assets[i]\n\n self.fromCombo.addItem(asset.name)\n self.from_objects.append(asset)\n\n self.toCombo.addItem(asset.name)\n self.to_objects.append(asset)\n\n def save_transaction(self):\n transaction = Transactions()\n\n transaction.from_entity = self.from_objects[self.fromCombo.currentIndex()]\n transaction.to_entity = self.to_objects[self.toCombo.currentIndex()]\n transaction.value = float(self.amountSpinBox.value())\n transaction.comment = self.commentLineEdit.text()\n # transaction.timestamp = int(datetime.datetime.now().timestamp()) - int( self.daysbackSpinBox.value() * 24 * 60 * 60)\n transaction.timestamp = int(self.dateTimeEdit.dateTime().toPyDateTime().timestamp())\n\n if self.l_type == Transactions.LAUNCH_ADD:\n self.db.add_transaction(transaction)\n elif self.l_type == Transactions.LAUNCH_EDIT:\n transaction.id = self.transaction.id\n self.db.update_transaction(transaction)\n self.Dialog.close()\n \n def set_header(self):\n string = '

{} Transaction

'\n\n if self.l_type == Transactions.LAUNCH_ADD:\n self.headerlabel.setText(string.format('Add'))\n elif self.l_type == Transactions.LAUNCH_EDIT:\n self.headerlabel.setText(string.format('Edit'))\n else:\n self.headerlabel.setText(string.format('Eh?')) \n \n\n def set_connections(self):\n self.Dialog.keyPressEvent = self.keyPressEvent\n self.typeCombo.currentIndexChanged.connect(self.load_combos)\n self.saveButton.clicked.connect(self.save_transaction)\n\n def keyPressEvent(self, e):\n if e.key() == QtCore.Qt.Key_Escape:\n self.Dialog.close() \n ", "sub_path": "app/add_transaction.py", "file_name": "add_transaction.py", "file_ext": "py", "file_size_in_byte": 5407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "app.gui.add_transaction.Ui_Dialog", "line_number": 12, "usage_type": "attribute"}, {"api_name": "app.gui.add_transaction", "line_number": 12, "usage_type": "name"}, {"api_name": "app.db.DatabaseHelper", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.db.models.Transactions.LAUNCH_EDIT", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.db.models.Transactions", "line_number": 27, "usage_type": "name"}, {"api_name": "app.db.models.Transactions.LAUNCH_EDIT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.db.models.Transactions", "line_number": 34, "usage_type": "name"}, {"api_name": "app.db.models.Entities.TYPE_INCOME", "line_number": 37, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities", "line_number": 37, "usage_type": "name"}, {"api_name": "app.db.models.Entities.TYPE_EXPENSE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities.TYPE_INCOME", "line_number": 74, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities", "line_number": 74, "usage_type": "name"}, {"api_name": "app.db.models.Entities.TYPE_ASSET", "line_number": 75, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities", "line_number": 75, "usage_type": "name"}, {"api_name": "app.db.models.Entities.TYPE_ASSET", "line_number": 88, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities", "line_number": 88, "usage_type": "name"}, {"api_name": "app.db.models.Entities.TYPE_EXPENSE", "line_number": 89, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities", "line_number": 89, "usage_type": "name"}, {"api_name": "app.db.models.Entities.TYPE_ASSET", "line_number": 102, "usage_type": "attribute"}, {"api_name": "app.db.models.Entities", "line_number": 102, "usage_type": "name"}, {"api_name": "app.db.models.Transactions", "line_number": 113, "usage_type": "call"}, {"api_name": "app.db.models.Transactions.LAUNCH_ADD", "line_number": 122, "usage_type": "attribute"}, {"api_name": "app.db.models.Transactions", "line_number": 122, "usage_type": "name"}, {"api_name": "app.db.models.Transactions.LAUNCH_EDIT", "line_number": 124, "usage_type": "attribute"}, {"api_name": "app.db.models.Transactions", "line_number": 124, "usage_type": "name"}, {"api_name": "app.db.models.Transactions.LAUNCH_ADD", "line_number": 132, "usage_type": "attribute"}, {"api_name": "app.db.models.Transactions", "line_number": 132, "usage_type": "name"}, {"api_name": "app.db.models.Transactions.LAUNCH_EDIT", "line_number": 134, "usage_type": "attribute"}, {"api_name": "app.db.models.Transactions", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 146, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 146, "usage_type": "name"}]} +{"seq_id": "350883699", "text": "import os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport torch \nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\n\nroot = './data'\nif not os.path.exists(root):\n os.mkdir(root)\n\nimg = './results/convolutional'\nif not os.path.exists(img):\n os.mkdir(img)\n\nlearning_rate = 1e-3\nbatch_size = 128\nnum_epochs = range(50)\n\ntrans = transforms.Compose([transforms.ToTensor()])\ntrain_set = dset.MNIST(root=root, train=True, transform=trans, download=True)\n\ntrain_loader = torch.utils.data.DataLoader(\n dataset=train_set,\n batch_size=batch_size,\n shuffle=True)\n\nclass AutoEncoder(nn.Module):\n def __init__(self):\n super(AutoEncoder, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(1, 16, 3, stride=3, padding=1), # b, 16, 10, 10\n nn.ReLU(True),\n nn.MaxPool2d(2, stride=2), # b, 16, 5, 5\n nn.Conv2d(16, 8, 3, stride=2, padding=1), # b, 8, 3, 3\n nn.ReLU(True),\n nn.MaxPool2d(2, stride=1) # b, 8, 2, 2\n )\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(8, 16, 3, stride=2), # b, 16, 5, 5\n nn.ReLU(True),\n nn.ConvTranspose2d(16, 8, 5, stride=3, padding=1), # b, 8, 15, 15\n nn.ReLU(True),\n nn.ConvTranspose2d(8, 1, 2, stride=2, padding=1), # b, 1, 28, 28\n nn.Tanh()\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\n def name(self):\n return \"ConvAutoEncoder\"\n\nmodel = AutoEncoder()\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n model = model.cuda()\n\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)\n\ndef mse_loss(input, target):\n return torch.sum((input - target) ** 2)\n\nprint(\"Training\")\nfor epoch in num_epochs:\n\n # Training\n print(\"Epoch:\", epoch)\n for batch_idx, (x, target) in enumerate(train_loader):\n\n optimizer.zero_grad()\n\n if use_cuda:\n x = x.cuda()\n\n x = Variable(x)\n output = model(x)\n\n loss = mse_loss(output, x)\n\n loss.backward()\n optimizer.step()\n\n if (batch_idx+1) % 100 == 0 or (batch_idx+1) == len(train_loader):\n print(\"Batch Index: {}, Train Loss: {:.6f}\".format(batch_idx+1, loss))\n \n print(\"Saving last example\")\n fig, (ax1, ax2) = plt.subplots(1, 2)\n\n input = x.detach()\n out = model(x).detach()\n\n if use_cuda:\n input = input.cpu()\n out = out.cpu()\n\n ax1.imshow(np.squeeze(input[0, :, :, :]))\n ax2.imshow(np.squeeze(out[0, :, :, :]))\n\n plt.savefig(img + \"/train_target_{}_epoch_{}.png\".format(target[0], epoch))\n\n# Save Model\ntorch.save(model.state_dict(), img + \"/\" + model.name())\n\n \n", "sub_path": "convolutional_autoencoder.py", "file_name": "convolutional_autoencoder.py", "file_ext": "py", "file_size_in_byte": 2910, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "647499502", "text": "import os\nimport sys\nimport numpy as np\nimport renom as rm\nfrom tqdm import tqdm\nfrom renom_img.api.utility.load import load_img\n\nfrom renom_img.api import Base\nfrom renom_img.api.utility.target import DataBuilderClassification\nfrom renom_img.api.utility.distributor.distributor import ImageDistributor\nfrom renom_img.api.utility.exceptions.exceptions import FunctionNotImplementedError\n\n\nclass Detection(Base):\n\n def preprocess(self, x):\n return x / 255.\n\n def predict(self, img_list, batch_size=1, score_threshold=0.3, nms_threshold=0.4):\n \"\"\"\n This method accepts an array of image paths, list of image paths, or a path to an image.\n\n Args:\n img_list (string, list, ndarray): Path to an image, list of path or ndarray.\n score_threshold (float): The threshold for the confidence score.\n Predicted boxes that have a lower confidence score than the threshold are discarded.\n The default is 0.3.\n nms_threshold (float): The threshold for non maximum supression. The default is 0.4.\n\n Return:\n (list): List of predicted bbox, score and class of each image.\n The format of the return value is shown below. Box coordinates and size will be returned as\n ratios to the original image size. Therefore, the values of 'box' are in the range [0 ~ 1].\n\n .. code-block :: python\n\n # An example of a return value.\n [\n [ # Prediction for first image.\n {'box': [x, y, w, h], 'score':(float), 'class':(int), 'name':(str)},\n {'box': [x, y, w, h], 'score':(float), 'class':(int), 'name':(str)},\n ...\n ],\n [ # Prediction for second image.\n {'box': [x, y, w, h], 'score':(float), 'class':(int), 'name':(str)},\n {'box': [x, y, w, h], 'score':(float), 'class':(int), 'name':(str)},\n ...\n ],\n ...\n ]\n\n Example:\n >>>\n >>> model.predict(['img01.jpg', 'img02.jpg']])\n [[{'box': [0.21, 0.44, 0.11, 0.32], 'score':0.823, 'class':1, 'name':'dog'}],\n [{'box': [0.87, 0.38, 0.84, 0.22], 'score':0.423, 'class':0, 'name':'cat'}]]\n\n Note:\n Box coordinates and size will be returned as ratios to the original image size.\n Therefore, the values of 'box' are in the range [0 ~ 1].\n\n \"\"\"\n self.set_models(inference=True)\n if isinstance(img_list, (list, str)):\n img_builder = self.build_data()\n if isinstance(img_list, (tuple, list)):\n results = []\n bar = tqdm()\n bar.total = int(np.ceil(len(img_list) / batch_size))\n for batch_num in range(0, len(img_list), batch_size):\n results.extend(self.get_bbox(self(img_builder(img_path_list=img_list[batch_num:batch_num + batch_size])).as_ndarray(),\n score_threshold,\n nms_threshold))\n bar.update(1)\n bar.close()\n return results\n else:\n return self.get_bbox(self(img_builder(img_path_list=[img_list])).as_ndarray(), score_threshold, nms_threshold)[0]\n else:\n img_array = img_list\n return self.get_bbox(self(img_array).as_ndarray(),\n score_threshold,\n nms_threshold)\n\n def loss(self, x, y):\n \"\"\"\n Loss function of ${class} algorithm.\n\n Args:\n x(ndarray, Node): Output of model.\n y(ndarray, Node): Target array.\n\n Returns:\n (Node): Loss between x and y.\n Example:\n >>> builder = model.build_data() # This will return a builder function.\n >>> x, y = builder(image_path_list, annotation_list)\n >>> z = model(x)\n >>> loss = model.loss(z, y)\n \"\"\"\n raise FunctionNotImplementedError(\n \"The loss function has not been implemented for the {} class.\".format(self.__class__))\n\n def build_data(self):\n \"\"\"\n This function returns a function which creates input data and target data\n specified for ${class}.\n\n Returns:\n (function): Returns function which creates input data and target data.\n\n Example:\n >>> builder = model.build_data() # This will return a builder function.\n >>> x, y = builder(image_path_list, annotation_list)\n >>> z = model(x)\n >>> loss = model.loss(z, y)\n \"\"\"\n raise FunctionNotImplementedError(\n \"The build_data function has not been implemented for the {} class.\".format(self.__class__))\n", "sub_path": "renom_img/api/detection/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4924, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "renom_img.api.Base", "line_number": 14, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 69, "usage_type": "call"}, {"api_name": "renom_img.api.utility.exceptions.exceptions.FunctionNotImplementedError", "line_number": 101, "usage_type": "call"}, {"api_name": "renom_img.api.utility.exceptions.exceptions.FunctionNotImplementedError", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "47676604", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport pytest\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint\n\nfrom colossalai.context.parallel_mode import ParallelMode\nfrom colossalai.context.random import add_seed, seed, set_mode\nfrom colossalai.utils import checkpoint\n\n\ndef forward(x, weight):\n out = torch.matmul(x, weight)\n with seed(ParallelMode.DATA):\n out_ = F.dropout(out, p=0.4, training=True)\n return out_\n\n\n@pytest.mark.gpu\ndef test_activation_checkpointing():\n add_seed(ParallelMode.GLOBAL, 1024)\n set_mode(ParallelMode.GLOBAL)\n global_cuda_rng_state = torch.cuda.get_rng_state()\n add_seed(ParallelMode.DATA, 1026)\n set_mode(ParallelMode.DATA)\n data_parallel_cuda_rng_state = torch.cuda.get_rng_state()\n set_mode(ParallelMode.GLOBAL)\n\n # normal\n data = torch.rand(2, 2, requires_grad=True).cuda()\n data.retain_grad()\n weight = torch.rand(2, 4, requires_grad=True).cuda()\n\n data_ = data.clone().detach()\n data_.requires_grad = True\n data_.retain_grad()\n weight_ = weight.clone().detach()\n weight_.requires_grad = True\n\n out = forward(data, weight)\n loss = out.sum()\n loss.backward()\n\n # checkpoint\n set_mode(ParallelMode.GLOBAL)\n torch.cuda.set_rng_state(global_cuda_rng_state)\n set_mode(ParallelMode.DATA)\n torch.cuda.set_rng_state(data_parallel_cuda_rng_state)\n set_mode(ParallelMode.GLOBAL)\n out = checkpoint(forward, data_, weight_)\n loss = out.sum()\n loss.backward()\n\n assert torch.all(data.grad == data_.grad), 'Gradient of the input does not match'\n\n\nif __name__ == '__main__':\n test_activation_checkpointing()\n", "sub_path": "tests/test_utils/test_activation_checkpointing.py", "file_name": "test_activation_checkpointing.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "torch.matmul", "line_number": 15, "usage_type": "call"}, {"api_name": "colossalai.context.random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.DATA", "line_number": 16, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 17, "usage_type": "name"}, {"api_name": "colossalai.context.random.add_seed", "line_number": 23, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.GLOBAL", "line_number": 23, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 23, "usage_type": "name"}, {"api_name": "colossalai.context.random.set_mode", "line_number": 24, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.GLOBAL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.cuda.get_rng_state", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "colossalai.context.random.add_seed", "line_number": 26, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.DATA", "line_number": 26, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 26, "usage_type": "name"}, {"api_name": "colossalai.context.random.set_mode", "line_number": 27, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.DATA", "line_number": 27, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.cuda.get_rng_state", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "colossalai.context.random.set_mode", "line_number": 29, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.GLOBAL", "line_number": 29, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.rand", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 34, "usage_type": "call"}, {"api_name": "colossalai.context.random.set_mode", "line_number": 47, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.GLOBAL", "line_number": 47, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.cuda.set_rng_state", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 48, "usage_type": "attribute"}, {"api_name": "colossalai.context.random.set_mode", "line_number": 49, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.DATA", "line_number": 49, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.cuda.set_rng_state", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 50, "usage_type": "attribute"}, {"api_name": "colossalai.context.random.set_mode", "line_number": 51, "usage_type": "call"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode.GLOBAL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "colossalai.context.parallel_mode.ParallelMode", "line_number": 51, "usage_type": "name"}, {"api_name": "colossalai.utils.checkpoint", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.all", "line_number": 56, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "413214407", "text": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Reads Training Start/End Time from Events File.\"\"\"\nimport datetime\n\nfrom absl import flags\n\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'model_dir', default=None,\n help=('The directory where the model and training/evaluation summaries are'\n ' stored.'))\n\nflags.DEFINE_integer(\n 'warmup_steps', default=None, help='Number of warmup steps taken.')\n\n\ndef main(unused_argv):\n if not FLAGS.model_dir:\n raise ValueError('--model_dir must be specified.')\n\n if not FLAGS.warmup_steps:\n raise ValueError('--warmup_steps must be non-zero.')\n\n target_step = FLAGS.warmup_steps\n current_step = 0\n max_wall_time = 0.0\n event_file = tf.gfile.Glob(FLAGS.model_dir + '/events.out.tfevents.*.n-*')[0]\n\n for e in tf.train.summary_iterator(event_file):\n for v in e.summary.value:\n if v.tag == 'loss':\n current_step += 1\n if current_step == target_step:\n print('training start (step %d): %s' %\n (current_step, datetime.datetime.fromtimestamp(\n e.wall_time).strftime('%Y-%m-%d %H:%M:%S.%f')))\n max_wall_time = max(e.wall_time, max_wall_time)\n\n print('training end (step %d): %s' %\n (current_step, datetime.datetime.fromtimestamp(\n max_wall_time).strftime('%Y-%m-%d %H:%M:%S.%f')))\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n", "sub_path": "models/official/resnet/benchmark/read_training_time.py", "file_name": "read_training_time.py", "file_ext": "py", "file_size_in_byte": 2085, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "absl.flags.FLAGS", "line_number": 22, "usage_type": "attribute"}, {"api_name": "absl.flags", "line_number": 22, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_string", "line_number": 24, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 24, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 29, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 29, "usage_type": "name"}, {"api_name": "tensorflow.gfile.Glob", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.train.summary_iterator", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.set_verbosity", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.app.run", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "577666840", "text": "import os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom share import data_format\nimport random\nimport json\nimport pandas as pd\n\n\n\nmodel_path=r\"D:\\data\\share\\model\\test\"\n\n\ndef get_data():\n df=data_format.Data_Format()\n # df.get_train_and_result_data()\n # return get_train_data(0,new_data,7,1,[],[])\n\n\n\ndef get_train_data(num,data,input_days,out_days,train_list,result_list):\n if num + input_days+out_days>=len(data):\n return train_list,result_list\n else:\n i=data[num:num + input_days]\n if out_days==1:\n l = data[num + input_days + out_days]\n else:\n l=data[num + input_days:num+input_days+out_days]\n train_list.append(i)\n result_list.append(l)\n return get_train_data(num +1,data,input_days,out_days,train_list,result_list)\n\ndef get_model():\n #输入\n lstm_input = tf.keras.Input(shape=(7,6), name='lstm_input')\n ts_code_input = tf.keras.Input(shape=(1) ,name='ts_code_input')\n area_input = tf.keras.Input(shape=(1), name='area_input')\n industry_input = tf.keras.Input(shape=(1), name='industry_input')\n #与输入对接的lstm和embedding\n # lstm=tf.keras.layers.CuDNNLSTM(units=256, input_shape=(7, 6))(lstm_input)\n lstm=tf.keras.layers.LSTM(units=256, input_shape=(7, 6))(lstm_input)\n area_e=tf.keras.layers.Embedding(input_dim=6000,output_dim=256)(ts_code_input)\n ts_code_e=tf.keras.layers.Embedding(input_dim=6000,output_dim=256)(area_input)\n industry_e=tf.keras.layers.Embedding(input_dim=6000,output_dim=256)(industry_input)\n\n\n a=tf.keras.backend.concatenate([area_e,ts_code_e,industry_e])\n info_out=tf.keras.layers.Dense(units=256)(a)\n lstm_reshape=tf.keras.layers.Reshape((1,256))(lstm)\n b = tf.keras.backend.concatenate([lstm_reshape,info_out])\n out1 = tf.keras.layers.Dense(units=256)(b)\n out=tf.keras.layers.Dense(units=2,activation=\"softmax\",name=\"out\")(out1)\n # out=tf.keras.layers.Dense(units=2,activation=\"softmax\",name=\"out\")(lstm_reshape)\n\n # print(out.shape)\n model=tf.keras.Model(inputs=[lstm_input,ts_code_input,area_input,industry_input],outputs=out)\n # model=tf.keras.Model(inputs=lstm_input,outputs=out)\n\n model.compile(optimizer=tf.train.RMSPropOptimizer(0.0005),\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=[tf.keras.metrics.categorical_accuracy]\n )\n return model\n\n\ndef main():\n model=get_model()\n train_data=np.load(r\"D:\\data\\share\\train_0913.npy\")\n result_data=np.load(r\"D:\\data\\share\\verify_0913.npy\")\n train_data=np.array(train_data)\n result_data=np.array(result_data)\n # model.build((None,7,6))\n model.summary()\n model.fit(train_data,result_data,nb_epoch=100,batch_size=10,verbose=1)\n model.save(model_path)\n\n\n# def predict(train_data):\n# model=get_model()\n# model.load_weights(model_path)\n# result=model.predict(train_data)\n# return result\n\n\nclass Predict():\n def __init__(self):\n self.names=data_format.Redis_Name_Manager()\n self.Data_Format=data_format.Data_Format()\n\n\n\n def predict(self,ts_code,date=None):\n share_info = self.Data_Format.get_share_data(ts_code)\n if share_info==None:\n return None,None\n share_info = json.loads(share_info)\n ids = share_info[data_format.IDS_NAME]\n if date==None:\n train_data, date = self.Data_Format.get_current_data(ts_code)\n # print(train_data,date)\n train_data = np.array(train_data).reshape((1, 7, 6))\n real_data = None\n else:\n train_data, real_data = self.Data_Format.get_train_and_result_data(ts_code, date)\n train_data = np.array(train_data).reshape((1, 7, 6))\n\n model=get_model()\n model=load_model(model)\n predict_data=model.predict({'lstm_input': train_data, 'ts_code_input': np.array([int(ids[0])]),\n 'area_input': np.array([int(ids[1])]), 'industry_input': np.array([int(ids[2])])})\n self.Data_Format.save_predict(ts_code,date,predict_data=predict_data.tolist())\n print(\"=======\",predict_data.shape,predict_data[0][0][0]>predict_data[0][0][1])\n return predict_data,real_data\n\n def predict_all(self):\n for ts_code in self.Data_Format.get_share_list()[0]:\n print(\"-------------\",ts_code)\n predict_data,_=self.predict(ts_code)\n print(type(predict_data))\n if type(predict_data)!=type(None):\n if predict_data[0][0][0]0:\n rxnlist2.append(sfmodel.reactions.get_by_id(rxn.id+\"_reverse\"))\n rxnlist2.append(sfmodel.reactions.get_by_id(rxn.id))\n #print(\"Rxn list =\"+str(rxnlist2))\n print(\"Running FVA\")\n \n if solver != \"\":\n import optlang\n if optlang.available_solvers.keys().__contains__(solver) and optlang.available_solvers[solver]:\n sfmodel.solver=solver\n else:\n print(\"Requested solver \"+solver+\" not available, using current model solver...\")\n fva = flux_analysis.flux_variability_analysis(sfmodel,reaction_list = rxnlist2)\n print(\"Processing results\")\n \n fva2=dict()\n for mode in fva.keys():\n if mode == \"maximum\":\n tempdict = dict()\n FVArxnSet = set()\n for rxn in fva[mode].keys():\n if rxn.__contains__(\"_reverse\"):\n rxn = rxn.replace(\"_reverse\",\"\")\n if FVArxnSet.__contains__(rxn):\n continue\n FVArxnSet.add(rxn)\n if not fva[mode].keys().__contains__(rxn+\"_reverse\"):\n maxi = fva[mode][rxn]\n else:\n maxi = fva[mode][rxn]+fva[mode][rxn+\"_reverse\"]\n tempdict[rxn]=maxi\n else:\n tempdict=dict()\n FVArxnSet = set()\n for rxn in fva[mode].keys():\n if rxn.__contains__(\"_reverse\"):\n rxn = rxn.replace(\"_reverse\",\"\")\n if FVArxnSet.__contains__(rxn):\n continue\n FVArxnSet.add(rxn)\n if not fva[mode].keys().__contains__(rxn+\"_reverse\"):\n mini = fva[mode][rxn]\n else:\n mini = fva[mode][rxn]+fva[mode][rxn+\"_reverse\"]\n tempdict[rxn]=mini\n fva2[mode]=tempdict\n \n sfmodel.fva = fva\n cobra_model.fva = fva2\n return cobra_model\n", "sub_path": "SweetloveGroup/FVA.py", "file_name": "FVA.py", "file_ext": "py", "file_size_in_byte": 2822, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cobra.flux_analysis.parsimonious.pfba", "line_number": 16, "usage_type": "call"}, {"api_name": "cobra.flux_analysis.parsimonious", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cobra.flux_analysis", "line_number": 16, "usage_type": "name"}, {"api_name": "SweetloveGroup.transform.rev2irrev", "line_number": 25, "usage_type": "call"}, {"api_name": "SweetloveGroup.constraints.constrainSumOfFluxes", "line_number": 27, "usage_type": "call"}, {"api_name": "optlang.available_solvers.keys", "line_number": 41, "usage_type": "call"}, {"api_name": "optlang.available_solvers", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cobra.flux_analysis.flux_variability_analysis", "line_number": 45, "usage_type": "call"}, {"api_name": "cobra.flux_analysis", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "541096697", "text": "from __future__ import (\n absolute_import,\n unicode_literals,\n)\n\nimport redis\nimport six\n\nfrom pysoa.common.transport.redis_gateway.backend.base import BaseRedisClient\n\n\nclass StandardRedisClient(BaseRedisClient):\n def __init__(self, hosts=None, connection_kwargs=None):\n self._hosts = self._setup_hosts(hosts)\n self._connection_list = [redis.Redis.from_url(host, **(connection_kwargs or {})) for host in self._hosts]\n\n super(StandardRedisClient, self).__init__(ring_size=len(self._hosts))\n\n @staticmethod\n def _setup_hosts(hosts):\n if not hosts:\n hosts = [('localhost', 6379)]\n\n if isinstance(hosts, six.string_types):\n raise ValueError('Redis hosts must be specified as an iterable list of hosts.')\n\n final_hosts = list()\n for entry in hosts:\n if isinstance(entry, six.string_types):\n final_hosts.append(entry)\n else:\n final_hosts.append('redis://{name}:{port:d}/0'.format(name=entry[0], port=entry[1]))\n return final_hosts\n\n def _get_connection(self, index=None):\n # If index is explicitly None, pick a random server\n if index is None:\n index = self._get_random_index()\n # Catch bad indexes\n if not 0 <= index < self._ring_size:\n raise ValueError(\n 'There are only {count} hosts, but you asked for connection {index}.'.format(\n count=self._ring_size,\n index=index,\n )\n )\n return self._connection_list[index]\n", "sub_path": "pysoa/common/transport/redis_gateway/backend/standard.py", "file_name": "standard.py", "file_ext": "py", "file_size_in_byte": 1595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pysoa.common.transport.redis_gateway.backend.base.BaseRedisClient", "line_number": 12, "usage_type": "name"}, {"api_name": "redis.Redis.from_url", "line_number": 15, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 15, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 24, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "378923266", "text": "import pathlib\nfrom typing import Optional, Union, List\nimport requests\nimport pandas as pd\n\nAPI_URL = 'https://www.metaweather.com/api/'\n\n\ndef get_city_data(\n woeid: int, year: int, month: int,\n path: Optional[Union[str, pathlib.Path]] = None,\n timeout: float = 5.\n) -> (str, List[str]):\n\n _file_paths = []\n data = []\n woeid = str(woeid)\n year = str(year)\n if month < 10:\n month = '0' + str(month)\n else:\n month = str(month)\n\n city_year_month = woeid + '_' + year + '_' + month\n\n if path:\n _dir_path = pathlib.Path.cwd() / path / city_year_month\n\n else:\n _dir_path = pathlib.Path.cwd() / city_year_month\n\n try:\n pathlib.Path.mkdir(_dir_path)\n except FileExistsError as err:\n print(err)\n\n for day in range(1, 32):\n\n if day < 10:\n day = '0' + str(day)\n else:\n day = str(day)\n\n url = API_URL + 'location/' + woeid + '/' + year + '/' + month + '/' + day\n try:\n\n response = requests.get(url, timeout=timeout)\n if response.status_code != 200:\n raise requests.exceptions.HTTPError\n\n response = response.json()\n print(f'Downloaded{day}')\n\n _file_paths.append(_dir_path / city_year_month)\n\n data.append(response[0])\n\n except RuntimeError as err:\n raise err\n frame = pd.DataFrame(data)\n frame.set_index('created')\n frame = frame[['created', 'min_temp', 'the_temp', 'max_temp', 'air_pressure', 'humidity', 'visibility',\n 'wind_direction_compass', 'wind_direction', 'wind_speed']]\n frame['created'] = frame['created'].apply(lambda x: str(x)[:16])\n frame = frame.rename({'the_temp': 'temp'}, axis=1)\n frame.to_csv(str(_dir_path) + '/' + (city_year_month + '.csv'), index=False)\n\n return _dir_path, _file_paths\n\n\nif __name__ == '__main__':\n _path = pathlib.Path.cwd()\n expected_path = _path / '523920_2017_03'\n dir_path, file_paths = get_city_data(523920, 2017, 3)\n assert len(file_paths) == 31\n assert pathlib.Path(dir_path).is_dir()\n assert expected_path == dir_path\n\n expected_path = 'weather_data/523920_2017_03'\n dir_path, file_paths = get_city_data(523920, 2017, 3, path='weather_data')\n assert len(file_paths) == 31\n assert pathlib.Path(dir_path).is_dir()\n assert expected_path == dir_path\n\n expected_path = 'weather_data/523920_2012_12'\n dir_path, file_paths = get_city_data(523920, 2012, 12, path='weather_data')\n assert len(file_paths) == 0\n assert pathlib.Path(dir_path).is_dir()\n assert expected_path == dir_path\n", "sub_path": "lab_10/tasks/task_2.py", "file_name": "task_2.py", "file_ext": "py", "file_size_in_byte": 2644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "typing.Optional", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 11, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pathlib.Path.cwd", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pathlib.Path.cwd", "line_number": 30, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pathlib.Path.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 60, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "pathlib.Path.cwd", "line_number": 72, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 82, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "185955259", "text": "def three_color_render(inputfile,sheetname,columnname,firstcolor,secondcolor,thirdcolor,outputfile):\n from openpyxl import load_workbook\n from openpyxl.styles import colors\n from openpyxl.formatting.rule import ColorScaleRule\n workbook = load_workbook(inputfile)\n sheet = workbook[sheetname]\n color_scale_rule = ColorScaleRule(start_type=\"percentile\",\n start_value=0,\n start_color=colors.COLOR_INDEX[firstcolor],\n mid_type=\"percentile\",\n mid_value=50,\n mid_color=colors.COLOR_INDEX[secondcolor],\n end_type=\"percentile\",\n end_value=100,\n end_color=colors.COLOR_INDEX[thirdcolor]\n )\n bcell = columnname+str(1)\n ecell = columnname+str(sheet.max_row)\n sheet.conditional_formatting.add(str(bcell)+\":\"+str(ecell),color_scale_rule)\n workbook.save(outputfile)\n\nif __name__ == \"__main__\":\n inputfile = \"D:\\\\1AAA\\python开发\\\\Bio_T2Ex\\CSV\\\\CSV.template.xlsx\"\n sheetname = \"Sheet\"\n columnname = \"G\"\n firstcolor = 30\n secondcolor = 1\n thirdcolor = 29\n outputfile = \"D:\\\\1AAA\\python开发\\\\Bio_T2Ex\\CSV\\\\CSV.template111.xlsx\"\n three_color_render(inputfile,sheetname,columnname,firstcolor,secondcolor,thirdcolor,outputfile)\n\n \n\n", "sub_path": "PACKAGE/build/lib/Bio_T2Ex/package_3_render.py", "file_name": "package_3_render.py", "file_ext": "py", "file_size_in_byte": 1469, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 5, "usage_type": "call"}, {"api_name": "openpyxl.formatting.rule.ColorScaleRule", "line_number": 7, "usage_type": "call"}, {"api_name": "openpyxl.styles.colors.COLOR_INDEX", "line_number": 9, "usage_type": "attribute"}, {"api_name": "openpyxl.styles.colors", "line_number": 9, "usage_type": "name"}, {"api_name": "openpyxl.styles.colors.COLOR_INDEX", "line_number": 12, "usage_type": "attribute"}, {"api_name": "openpyxl.styles.colors", "line_number": 12, "usage_type": "name"}, {"api_name": "openpyxl.styles.colors.COLOR_INDEX", "line_number": 15, "usage_type": "attribute"}, {"api_name": "openpyxl.styles.colors", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "383883945", "text": "import collections\nimport json\nimport pickle\nfrom pathlib import Path\n\nimport nltk\nfrom nltk import word_tokenize\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndef process_query(query):\n # Paths to load the trained model\n base_path = Path(__file__).parent.parent\n corpus_dict_path = base_path / \"data/raw_data/summaries/summary_file.txt\"\n vectorizer_path = base_path / \"data/trained_model/corpus_vectorizer.pkl\"\n corpus_tfidf_path = base_path / \"data/trained_model/corpus_tfidf.pkl\"\n\n corpus_vectorizer = pickle.load(open(vectorizer_path, \"rb\"))\n corpus_tfidf = pickle.load(open(corpus_tfidf_path, \"rb\"))\n corpus_dict = collections.OrderedDict()\n\n with open(corpus_dict_path, 'r') as read_file:\n corpus_dict = json.load(read_file)\n\n tfidf_matrix_test = corpus_vectorizer.transform([query])\n cosine_similarity_matrix = cosine_similarity(corpus_tfidf, tfidf_matrix_test)\n return corpus_dict, cosine_similarity_matrix\n\n\n# Map the original corpus to its cosine score\ndef get_recommendations(corpus_dict, cosine_similarity_matrix):\n items = list(corpus_dict.items())\n recommendation_dict = collections.OrderedDict()\n\n for i in range(0, len(items)):\n corpus_text = items[i]\n title = corpus_text[0]\n cosine_score = cosine_similarity_matrix[i]\n recommendation_dict[title] = cosine_score\n\n sorted_recommendation_dict = {k: v for k, v in\n sorted(recommendation_dict.items(), reverse=True, key=lambda item: item[1])}\n return sorted_recommendation_dict\n\n\n# Print the recommendations\ndef print_recommendations(corpus_dict, sorted_recommendation_dict, presult=False):\n print(\"Based on your search query, look at these datasets from CSA :\")\n\n # We limit the search results to 10\n limit = 10\n count = 0\n result = collections.OrderedDict()\n for title, cosine_similarity in sorted_recommendation_dict.items():\n if cosine_similarity == 0.0 or count == limit:\n break\n result[title] = corpus_dict[title]\n count += 1\n\n if presult:\n for k, v in result.items():\n print(k)\n print(v)\n print(\"-----------------------------------------------------\")\n return result\n\n# Function to tokenize the text blob\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemma = find_lemma(tokens)\n return lemma\n\n\n# Lemmatize words for better matching\ndef find_lemma(tokens):\n wordnet_lemmatizer = nltk.WordNetLemmatizer()\n result = []\n for word in tokens:\n lemma_word = wordnet_lemmatizer.lemmatize(word)\n result.append(lemma_word)\n return result\n\ndef get_user_query(query_dict):\n # Read the input document that needs to be compared\n return query_dict[\"query\"]\n\n\ndef recommend(query):\n #Build model\n #build_TFDIF_model.build_model()\n\n qdict = {\"query\": query}\n user_query = get_user_query(qdict)\n corpus_dict, cosine_similarity_matrix = process_query(user_query)\n recommendation_dict = get_recommendations(corpus_dict, cosine_similarity_matrix)\n return print_recommendations(corpus_dict, recommendation_dict)\n\ndef main():\n query_dict = {\"query\": \"I want to know more about WINDII and Doppler and performance\"}\n\n user_query = get_user_query(query_dict)\n corpus_dict, cosine_similarity_matrix = process_query(user_query)\n recommendation_dict = get_recommendations(corpus_dict, cosine_similarity_matrix)\n print_recommendations(corpus_dict, recommendation_dict, True)\n\n# Call main method\nif __name__ == \"__main__\":\n main()\n", "sub_path": "vhere/code/Recommender.py", "file_name": "Recommender.py", "file_ext": "py", "file_size_in_byte": 3580, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 19, "usage_type": "call"}, {"api_name": "json.load", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 32, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 53, "usage_type": "name"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 54, "usage_type": "name"}, {"api_name": "nltk.word_tokenize", "line_number": 68, "usage_type": "call"}, {"api_name": "nltk.WordNetLemmatizer", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "367939775", "text": "from functools import cmp_to_key\n\ndef cmp(a,b):\n if sum([a[1],a[2]]) > sum([b[1],b[2]]):\n return 1\n elif sum([a[1],a[2]]) < sum([b[1],b[2]]):\n return -1\n else:\n if a[1]>b[1]:\n return 1\n elif a[1]b[0]:\n return -1\n else:\n return 1\n\n\nn, low, high = [int(i) for i in input().split(' ')]\nsaver = {'sage': [], 'nobleman': [], 'fool_men': [], 'other': []}\nfor _ in range(n):\n id_, virtue, talent = list(map(int, input().split()))\n if virtue >= low and talent >= low:\n if virtue >= high and talent >= high:\n saver['sage'].append([id_, virtue, talent])\n elif talent < high and virtue >= high:\n saver['nobleman'].append([id_, virtue, talent])\n elif virtue < high and talent < high and virtue >= talent:\n saver['fool_men'].append([id_, virtue, talent])\n else:\n saver['other'].append([id_, virtue, talent])\nret_data = []\nfor title in ['sage', 'nobleman', 'fool_men', 'other']:\n ret_data += sorted(saver[title], key=cmp_to_key(cmp),reverse=True)\nprint(len(ret_data))\nfor data in ret_data:\n print('{} {} {}'.format(data[0],data[1],data[2]))\n", "sub_path": "pat1062/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "functools.cmp_to_key", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "614617484", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport seaborn as sns\nfrom scipy import stats #to call a function that removes anomalies\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint((check_output([\"ls\", \"../input\"]).decode(\"utf8\")))\n\n# Any results you write to the current directory are saved as output.\n\n\n# Hello,\n# \n# So I analysed certain factors to see if they had any relationships with house prices and the factors that had the most relationships were number of bathrooms, grade and sqft_living. \n# \n# The coefficient result was quite interesting and unexpected, you should definitely check it out.\n# \n# I'm still new at this and soo all feedback is greatly appreciated.\n# \n# Cheers!\n# \n# Fayomi\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\ndf = pd.read_csv('../input/kc_house_data.csv')\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\ndf.drop(['id','date','sqft_lot','sqft_above','lat', 'long','zipcode', 'sqft_living15', 'sqft_lot15','waterfront','view'],axis=1,inplace=True)\n\n\n# In[ ]:\n\n\ndf = df[(np.abs(stats.zscore(df)) < 3).all(axis=1)] #to remove anomalies\ndf.head()\n\n\n# In[ ]:\n\n\ndf.info()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nplt.figure(figsize=(16,6))\nsns.distplot(df['price'],kde=False,bins=50)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nplt.figure(figsize=(16,6))\nsns.distplot(df['price'].dropna(),kde=False,bins=50)\n\n\n# In[ ]:\n\n\nplt.figure(figsize=(16,6))\nsns.countplot(df['bedrooms'])\n\n\n# In[ ]:\n\n\nplt.figure(figsize=(16,6))\nsns.countplot(df['bathrooms'])\n\n\n# In[ ]:\n\n\nplt.figure(figsize=(16,6))\nsns.distplot(df['sqft_living'].dropna(),kde=False,bins=50)\n\n\n# In[ ]:\n\n\nsns.pairplot(df)\n\n\n# In[ ]:\n\n\nsns.jointplot(x='bedrooms',y='price',data=df)\n\n\n# In[ ]:\n\n\nsns.jointplot(x='price',y='sqft_living',data=df,kind='reg')\n\n\n# In[ ]:\n\n\nsns.jointplot(x='floors',y='price',data=df)\n\n\n# In[ ]:\n\n\nsns.jointplot(x='grade',y='price',data=df, kind='reg')\n\n\n# In[ ]:\n\n\nsns.jointplot(x='yr_built',y='price',data=df)\n\n\n# In[ ]:\n\n\nsns.jointplot(x='sqft_basement',y='price',data=df)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nsns.jointplot(x='bathrooms',y='price',data=df, kind='reg')\n\n\n# In[ ]:\n\n\nsns.jointplot(x='condition',y='price',data=df)\n\n\n# the conditions most correlated with price are: bathrooms,, grade, sqft_living (and maybe bedrooms)\n\n# In[ ]:\n\n\nsns.heatmap(df.corr(),cmap='coolwarm', annot=True)\n\n\n# \n# TIME TO FORMAT DATA FOR ML\n\n# In[ ]:\n\n\n\ndf.columns\n\n\n# In[ ]:\n\n\n#selected inputs\nx = df[['bathrooms','grade','sqft_living']]\n#expected output\ny = df['price']\n\n\n# In[ ]:\n\n\nfrom sklearn.cross_validation import train_test_split\n\n\n# In[ ]:\n\n\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=101)\n\n\n# In[ ]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[ ]:\n\n\nlm = LinearRegression()\n\n\n# In[ ]:\n\n\n#to train the data\nlm.fit(x_train,y_train)\n\n\n# In[ ]:\n\n\n#to calculate teh coefficients\nlm.coef_\n\n\n# In[ ]:\n\n\n#to create a table with the coefs\ncdf = pd.DataFrame(lm.coef_,x.columns,columns=['coefs'])\n\n\n# In[ ]:\n\n\ncdf\n\n\n# In[ ]:\n\n\n#to get the predictions of test set\npred = lm.predict(x_test)\n\n\n# In[ ]:\n\n\n#to plot predictions and actual result\n#This shows an accurate preditction\nplt.scatter(y_test, pred)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "downloaded_kernels/house_sales/parsed_kernels/kernel_18.py", "file_name": "kernel_18.py", "file_ext": "py", "file_size_in_byte": 3733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "subprocess.check_output", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.stats.zscore", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 120, "usage_type": "call"}, {"api_name": "seaborn.pairplot", "line_number": 126, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 132, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 138, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 144, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 150, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 156, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 162, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 174, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 180, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 188, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 231, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}]} +{"seq_id": "209780643", "text": "import py.path\n\nfrom pdfminer.pdfparser import PDFParser, PDFDocument\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.layout import LAParams, LTTextBox\nfrom pdfminer.converter import PDFPageAggregator\n\ndef eq_(a, b, msg=None):\n __tracebackhide__ = True\n assert a == b, msg or \"%r != %r\" % (a, b)\n\nclass TestData:\n def __init__(self, datadirpath):\n self.datadirpath = py.path.local(datadirpath)\n \n def filepath(self, relative_path, *args):\n \"\"\"Returns the path of a file in testdata.\n \n 'relative_path' can be anything that can be added to a Path\n if args is not empty, it will be joined to relative_path\n \"\"\"\n resultpath = self.datadirpath.join(relative_path)\n if args:\n resultpath = resultpath.join(*args)\n assert resultpath.check()\n return str(resultpath)\n\ndef pages_from_pdf(path, **laparams):\n fp = open(path, 'rb')\n doc = PDFDocument(caching=True)\n parser = PDFParser(fp)\n parser.set_document(doc)\n doc.set_parser(parser)\n doc.initialize()\n rsrcmgr = PDFResourceManager()\n laparams = LAParams(all_texts=True, **laparams)\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n result = []\n for page in doc.get_pages():\n interpreter.process_page(page)\n page_layout = device.get_result()\n result.append(page_layout)\n return result\n\ndef extract_from_elem(elem, lookfor):\n if isinstance(elem, lookfor):\n return [elem]\n else:\n try:\n return sum((extract_from_elem(subelem, lookfor) for subelem in elem), [])\n except TypeError:\n return []\n\ndef extract_textboxes(elem):\n return extract_from_elem(elem, lookfor=LTTextBox)", "sub_path": "tests/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "py.path.path.local", "line_number": 14, "usage_type": "call"}, {"api_name": "py.path.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "py.path", "line_number": 14, "usage_type": "name"}, {"api_name": "pdfminer.pdfparser.PDFDocument", "line_number": 30, "usage_type": "call"}, {"api_name": "pdfminer.pdfparser.PDFParser", "line_number": 31, "usage_type": "call"}, {"api_name": "pdfminer.pdfinterp.PDFResourceManager", "line_number": 35, "usage_type": "call"}, {"api_name": "pdfminer.layout.LAParams", "line_number": 36, "usage_type": "call"}, {"api_name": "pdfminer.converter.PDFPageAggregator", "line_number": 37, "usage_type": "call"}, {"api_name": "pdfminer.pdfinterp.PDFPageInterpreter", "line_number": 38, "usage_type": "call"}, {"api_name": "pdfminer.layout.LTTextBox", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "480579885", "text": "\"\"\"\nGroupMe Statistics Project\n\nUSAGE\n work.py [mode]\n\n \n Filename of the GroupMe transcript to analyze. (REQUIRED)\n\n [mode]\n Valid options are:\n\n uids - Print the unique, GroupMe-assigned IDs of every user\n who has participated in the chat, along with the first\n screenname they used. This often (but not always)\n corresponds to their real name.\n\n markov - Instead of performing the standard analyses,\n generate Markov text based on the parameters described\n at the start of the script.\n\n - Run the basic analysis on the chat, including,\n but not limited to:\n - Total messages sent\n - Total likes received\n - Likes received per message sent\n - Gender analysis (if applicable)\n - Etc.\n\n\nFIRST-TIME USE INSTRUCTIONS\n Before running analysis on a file (here, \"transcript.json\"), several CSVs\n should be set up to provide more information about the chat participants.\n\n First, run the command:\n\n work.py transcript.json uids > uids.csv\n\n This will generate a CSV with two fields: user IDs, and names. COPY\n this file until you have three copies, then RENAME them to the filenames\n given below:\n\n vip.csv - The users who will be included in the analysis. If you do not\n want infrequent contributors polluting the results, REMOVE\n their row from the spreadsheet.\n\n In addition, the remaining VIPs can be given any name/nickname\n in the second column, and they will be referred to by this name\n in the generated results. This can be used, for example, to\n replace the silly screen-names of friends (\"Captain Obvious\",\n \"Cat Lover\") with actual names (\"Jim\", \"Rachel\"),\n thus avoiding confusion and ambiguity when reviewing the results.\n\n women.csv - The female users. This file is not necessary, but the script \n will skip all gender-related analysis if this file does not\n exist.\n\n To use, REMOVE the rows representing male participants\n from this file, so that only female participants remain.\n The second column has no bearing on the results, but can be\n retained for convenience should revisit this file later.\n\n pets.csv - Keywords representing the names of users' pets, including\n the type of pet, the pet name, and any nicknames. Terms\n are plus-delimited in the second column. A sample line\n may look like:\n\n 4215225,scruffy+the dog+my dog+scruffs\n\n These terms will be used to count how often a user mentions\n his/her pet.\n\n Place all these CSV files in the same path as work.py, and they will be\n automatically picked up when the script is run.\n\n Now, run the full command. Remember to pipe the output to a file: without\n any direction, the script will print all results to STDOUT:\n\n work.py transcript.json > data.csv\n\n Alternatively, to generate Markov output, run:\n\n work.py transcript.json markov > mark.csv\n\n Output file names are only suggestions. Markov output is much better\n with a larger dataset. Small transcripts will most yield sentences\n that are carbon-copies of actual text from the chat.\n\n Have fun!\n\"\"\"\nimport sys\nimport io # For Unicode validation\nfrom random import choice\nimport os.path # For isfile\n\nimport json\nfrom datetime import datetime\nimport re # For capturing boops\n\n\n# -- Markov parameters --\n\n# Number of words to remember in markov history\n# e.g. for length 2 and sentence \"I like listening to music\":\n#\n# \"I like\" -> listening\n# \"like listening\" -> to\n# \"listening to\" -> music\n# etc\nmarkov_chain_length = 2\n\n# Minimum message length to consider\nmarkov_msg_min_words = markov_chain_length * 2\n\n# Create separate markov chain for each user, versus a single chain\n# This MUST be set to false for high values of markov_msg_min_words,\n# since otherwise some users will not have any text associated with their UID\nmarkov_user_chains = False\n\n# Level to restrict characters\n# 0 == no restrictions at all\nmarkov_allow_level = 2\n\n# Number of messages to generate\nmarkov_output_msg_amt = 500\n\n# Print a debug table of markov values and exit\nmarkov_debug_table = False\n\n# Convert all characters to lowercase\nmarkov_all_lower = False\n\n# Only allow MC output from this user - overrides markov_user_chains\nmarkov_fix_uid = \"\"\n\n# Print verbose markov output to markov_verbose_file\nmarkov_verbose_flag = False\nmarkov_verbose_file = sys.stderr\n\n# -- end Markov parameters --\n\n# -- Constants --\n\n# Random text that shouldn't ever appear in the groupme\nMARKOV_START = \"fpaj8fP)#8FJPO*\"\nMARKOV_END = \"pOp98)J#P*)(J#FP*WF\"\n\n# Dino character\nDINO_CHAR = '�'\n\n# -- end Constants --\n\n# -- General parameters --\n\n# Decimal places to round for output\nDEC_PLACES = 2\n\n# Choose whether to include formal words when scoring\n# gender rating of each user.\ninclude_formal_words = False\n\n# Choose whether to report most-used words\n# This adds a few noticeable seconds of runtime\ncapture_top_words = False\n\n# Number of top words to report\namt_top_words = 200\n\n# Only print report titles\nonly_print_titles = False\n\n# Get popular messages with a least [x] likes\npopular_min = 8\n\n# -- end Parameters --\n\n# If true, print UIDs and exit.\n# Can also be triggered by providing \"uids\" in the command line\n# after the required argument(s).\nshow_uid_map = False\n\ndef main():\n \"\"\"work.py [mode]\"\"\"\n\n global show_uid_map\n markov_enabled = False\n\n if len(sys.argv) < 2:\n print(main.__doc__)\n sys.exit(1)\n\n if len(sys.argv) >= 3:\n mode = sys.argv[2]\n\n if mode == \"uids\":\n show_uid_map = True\n elif mode == \"markov\":\n markov_enabled = True\n else:\n print(\"-E- Mode\", mode, \"not recognized.\", file=sys.stderr)\n sys.exit(1)\n\n with open(sys.argv[1], encoding='utf-8') as fh:\n transcript = json.load(fh)\n fh.close()\n\n if show_uid_map:\n print_all_uids(transcript)\n return\n\n names = get_uids_from_csv(\"vip.csv\")\n pets = get_uids_from_csv(\"pets.csv\")\n\n # -- Markov chain! --\n if markov_enabled:\n markov_data = gen_markov_chain(transcript, names)\n gen_markov_output(markov_data, names)\n return\n\n # -- Report overall totals --\n get_report_totals(transcript)\n\n # -- Get/report likes --\n\n (likes_given, likes_received) = calc_likes(transcript)\n\n report_uid_data(likes_given, names, \"Likes given\")\n report_uid_data(likes_received, names, \"Likes received\")\n\n # This function prints its own data\n date_analysis(names, transcript)\n\n # -- Get/report basic user stats --\n\n (msgs_sent, chars_sent) = basic_user_stats(transcript)\n\n report_uid_data(msgs_sent, names, \"Messages sent\")\n report_uid_data(chars_sent, names, \"Characeters sent\")\n\n # -- Boooooooop --\n\n boops_per_user = regex_count(transcript, 'bo+p')\n report_uid_data(boops_per_user, names, \"Boops sent\")\n\n # -- Get some interesting man-v-woman data --\n\n (fem_ids, amt_each_sex) = get_fem_ids(\"women.csv\", msgs_sent.keys())\n\n if fem_ids:\n report_by_gender(split_by_gender(msgs_sent, fem_ids), amt_each_sex, \"Messages sent\")\n report_by_gender(split_by_gender(chars_sent, fem_ids), amt_each_sex, \"Characters sent\")\n report_by_gender(split_by_gender(likes_given, fem_ids), amt_each_sex, \"Likes given\")\n report_by_gender(split_by_gender(likes_received, fem_ids), amt_each_sex, \"Likes received\")\n\n # liked_gender = get_gender_liked(transcript, fem_ids)\n # report_top_words(liked_gender[0], 50, \"Most-liked words by women\")\n\n # -- Get message quality as a function of likes received per message --\n\n likes_rec_per_msg = msg_quality(msgs_sent, likes_received)\n report_uid_data(likes_rec_per_msg, names, \"Likes per message\")\n\n # -- Second round: v. specific data --\n\n bro_per_user = regex_count(transcript, '\\\\bgym\\\\b|\\\\bbro\\\\b|\\\\bgains\\\\b|\\\\bswole\\\\b')\n report_uid_data(bro_per_user, names, \"Most bro\")\n\n dino_per_user = regex_count(transcript, DINO_CHAR)\n report_uid_data(dino_per_user, names, \"Most dino\")\n\n dino_starts_per_user, avg_dino_length, dino_count = get_dino_starters(transcript)\n report_uid_data(dino_starts_per_user, names, \"Dino starters\")\n print(\"Avg dino chain length,\" + str(avg_dino_length) + \"\\n\")\n print(\"Dino count,\" + str(dino_count) + \"\\n\")\n\n ha_per_user = regex_count(transcript, '\\\\b(ha)+\\\\b')\n report_uid_data(ha_per_user, names, \"Giggliest (number of ha's)\")\n\n # -- Scaling by days active --\n\n days_active = get_days_active(transcript)\n report_uid_data(days_active, names, \"Days active\")\n\n report_over_time(days_active, names, [\n [msgs_sent, 'Messages sent'],\n [likes_given, 'Likes given'],\n [likes_received, 'Likes received']\n ])\n\n like_bd = get_like_breakdown(transcript)\n self_love = get_self_love(like_bd)\n\n report_uid_data(self_love, names, \"Self love\")\n\n # -- User x User data reporting, KEEP AT BOTTOM --\n\n report_uxu(like_bd, names, \"Likes per person\", \"liked by\")\n\n responses = get_response_amt(transcript)\n report_uxu(responses, names, \"Responses by person\", \"followed by\")\n\n like_matches = get_like_matches(transcript, likes_given)\n report_uxu(like_matches, names, \"Like matches\", \"shared x% of likes with\")\n\n # -- Random stuff --\n\n avg_haha_len = get_avg_haha(transcript)\n report_uid_data(avg_haha_len, names, \"Avg haha length\")\n\n gender_scores = get_gender_scores(transcript)\n report_uid_data(gender_scores, names, \"Gender score\")\n\n get_best_msgs(transcript, names)\n get_liked_freq(transcript)\n\n get_like_details(transcript, names)\n\n avg_word_len = get_avg_word_len(transcript)\n report_uid_data(avg_word_len, names, \"Avg word length\")\n\n if pets:\n pet_scores = get_pet_scores(transcript, pets)\n report_uid_data(pet_scores, names, \"Pet scores\")\n\n # -- Word count --\n\n if capture_top_words:\n word_count = get_word_count(transcript)\n report_top_words(word_count, amt_top_words, \"Most-used words\")\n\ndef get_report_totals(transcript):\n \"\"\"\n Tabulate transcript totals without regard for\n user-specific data, e.g. total messages.\n \"\"\"\n\n msgs = 0\n msgs_liked = 0\n chars = 0\n likes = 0\n sys_likes = 0\n\n for msg in transcript:\n\n msg_likes = len(msg['favorited_by'])\n\n if from_sys(msg):\n sys_likes += msg_likes\n continue\n else:\n likes += msg_likes\n\n msgs += 1\n\n if msg_likes:\n msgs_liked += 1\n\n if msg['text']:\n chars += len(msg['text'])\n\n print(\"Total\")\n print(\"Messages\", msgs, sep=',')\n print(\"Messages liked\", msgs_liked, sep=',')\n print(\"Characters\", chars, sep=',')\n print()\n print(\"User-to-user likes\", likes, sep=',')\n print(\"User-to-system likes\", sys_likes, sep=',')\n print(\"Total likes\", likes+sys_likes, sep=',')\n print()\n\ndef report_top_words(word_freq, amt, title):\n \"\"\"Report top words, excluding non-printable words\"\"\"\n\n sorted_wf = sorted(word_freq, key=word_freq.get, reverse=True)\n\n i = 0\n good_words = []\n\n while len(good_words) < amt:\n word = sorted_wf[i]\n i += 1\n\n good_words.append(word)\n\n # There is going to be a messed-up line in CSV output...\n # Don't let it pollute actual data\n print()\n\n print(title)\n\n if only_print_titles:\n return\n\n for i in range(0, amt):\n word = good_words[i]\n print(i+1, to_ascii(word), word_freq.get(word, 0), sep=',')\n\ndef get_gender_scores(transcript):\n \"\"\"Calculate the \"gender score\" of each user\n\n Scoring originally described by Argamon, et al (2003).\n Scoring values introduced by the Gender Genie, and\n adopted by the Gender Guesser.\"\"\"\n\n result = {}\n\n male_score = {}\n female_score = {}\n uids = set()\n\n word_values = define_word_gender_values()\n\n for msg in transcript:\n uid = msg['user_id']\n line = msg['text']\n\n if from_sys(msg) or line is None or 'http' in line:\n continue\n\n words = line.lower().split()\n\n # Iterate over each word, updating scoring if word has a weight\n for word in words:\n if word in word_values:\n uids.add(uid)\n value = word_values[word]\n\n if value > 0:\n male_score[uid] = male_score.setdefault(uid, 0) + value\n else:\n female_score[uid] = female_score.setdefault(uid, 0) - value\n\n for uid in uids:\n ms = male_score.get(uid, 0)\n fs = female_score.get(uid, 0)\n\n # Cover case when we would be dividing by zero\n if ms + fs == 0:\n result[uid] = 0\n continue\n\n # Grab percentage male\n # To view raw score, calculate (ms - fs)\n result[uid] = ms * 100 / (ms + fs)\n\n return result\n\ndef define_word_gender_values():\n\n words = {}\n\n # Informal scores\n\n words['actually']= -49;\n words['am']= -42;\n words['as']= 37;\n words['because']= -55;\n words['but']= -43;\n words['ever']= 21;\n words['everything']= -44;\n words['good']= 31;\n words['has']= -33;\n words['him']= -73;\n words['if']= 25;\n words['in']= 10;\n words['is']= 19;\n words['like']= -43;\n words['more']= -41;\n words['now']= 33;\n words['out']= -39;\n words['since']= -25;\n words['so']= -64;\n words['some']= 58;\n words['something']= 26;\n words['the']= 17;\n words['this']= 44;\n words['too']= -38;\n words['well']= 15;\n\n if not include_formal_words:\n return words\n\n # Formal scores\n\n words['a']= 6;\n words['above']= 4;\n words['and']= -4;\n words['are']= 28;\n words['around']= 42;\n words['as']= 23;\n words['at']= 6;\n words['be']= -17;\n words['below']= 8;\n words['her']= -9;\n words['hers']= -3;\n words['if']= -47;\n words['is']= 8;\n words['it']= 6;\n words['many']= 6;\n words['me']= -4;\n words['more']= 34;\n words['myself']= -4;\n words['not']= -27;\n words['said']= 5;\n words['she']= -6;\n words['should']= -7;\n words['the']= 7;\n words['these']= 8;\n words['to']= 2;\n words['was']= -1;\n words['we']= -8;\n words['what']= 35;\n words['when']= -17;\n words['where']= -18;\n words['who']= 19;\n words['with']= -52;\n words['your']= -17;\n\n return words\n\ndef get_response_amt(transcript):\n \"\"\"\n How much each user responds to other users\n \n Data is 'followed by', so\n result[Alice][Bob] == number of times Bob responded to Alice\n \"\"\"\n\n result = {}\n\n last_uid = \"\"\n\n for msg in transcript:\n uid = msg['user_id']\n\n if from_sys(msg):\n continue\n\n if last_uid != \"\":\n if last_uid not in result:\n result[last_uid] = {}\n\n if last_uid != uid:\n result[last_uid][uid] = result[last_uid].setdefault(uid, 0) + 1\n\n last_uid = uid\n\n return result\n\ndef get_avg_word_len(transcript):\n \"\"\"Get average word length per user\"\"\"\n\n chars = {}\n words = {}\n avg = {}\n\n for msg in transcript:\n uid = msg['user_id']\n text = msg['text']\n\n if text is None:\n continue\n\n msg_words = text.split()\n\n words[uid] = words.setdefault(uid, 0) + len(msg_words)\n for mw in msg_words:\n if 'http' in mw:\n continue\n chars[uid] = chars.setdefault(uid, 0) + len(mw)\n\n for uid in words:\n avg[uid] = chars[uid] / words[uid]\n\n return avg\n\ndef get_dino_starters(transcript):\n \"\"\"Get number of times each user has started a dino chain\n of at least 2\n \n Return:\n (UID table of times each user started a chain,\n avg chain length, total dino count)\n \"\"\"\n\n result = {}\n chain_length = 0\n first_uid = 0\n chain_lengths = []\n dino_count = 0\n\n for msg in transcript:\n uid = msg['user_id']\n text = msg['text']\n\n if text is None:\n continue\n\n # If a dino exists in the current message\n dino_lives = DINO_CHAR in text\n # Rolling count of dinos in the active chain\n dino_count += text.count(DINO_CHAR)\n\n # If a chain was started, but now it has ended,\n # capture the full length of the chain\n if not dino_lives and chain_length > 1:\n chain_lengths.append(chain_length)\n\n chain_length = chain_length + 1 if dino_lives else 0\n\n # Capture chain starter\n if chain_length == 1:\n saved_uid = uid\n\n # It's a chain! Save starter ID\n if chain_length == 2:\n result[saved_uid] = result.setdefault(saved_uid, 0) + 1\n\n return (result, sum(chain_lengths) / len(chain_lengths), dino_count)\n\ndef get_gender_liked(transcript, fem_ids):\n \"\"\"Get words most liked by each gender\"\"\"\n\n fem = 0\n men = 1\n\n liked = [{}, {}]\n\n for msg in transcript:\n owner = msg['user_id']\n line = msg['text']\n\n if from_sys(msg) or line is None or 'http' in line:\n continue\n\n words = line.lower().split()\n liked_by = msg['favorited_by']\n\n for wd in words:\n for uid in liked_by:\n word = remove_punctuation(wd)\n\n if uid in fem_ids:\n index = fem\n else:\n index = men\n\n liked[index][word] = liked[index].setdefault(word, 0) + 1\n\n return liked\n\ndef report_uxu(data, names, title, postfix):\n \"\"\"\n Return a table with x and y axes representing users.\n Useful for certain datasets, such as who responds to whom.\n \"\"\"\n\n print(title + \"\\n\")\n\n if only_print_titles:\n return\n\n print(postfix, end='')\n\n uids = list(names.keys())\n\n # -- Print y-axis names --\n\n topline = \"\"\n\n for uid in uids:\n topline += ',' + names[uid]\n\n print(topline)\n\n # x-axis, row-by-row\n for row in uids:\n line = names[row]\n\n # Iterate across columns\n for col in uids:\n if row in data and col in data[row]:\n line += ',' + str(round(data[row][col], DEC_PLACES))\n else:\n line += ',0'\n\n # Print this row\n print(line)\n\n print()\n\ndef get_self_love(like_bd):\n \"\"\"\n How many times has someone hearted him/herself?\n \n Omit 0-count results.\n \"\"\"\n\n result = {}\n\n for uid in like_bd:\n if uid in like_bd[uid]:\n result[uid] = like_bd[uid][uid]\n\n return result\n\ndef get_word_count(transcript):\n \"\"\"Return dictionary with word count.\"\"\"\n\n result = {}\n\n for msg in transcript:\n line = msg['text']\n\n if from_sys(msg) or line is None:\n continue\n\n words = line.lower().split()\n\n # Add word-by-word\n for word in words:\n if 'http' in word:\n continue\n\n word = remove_punctuation(word)\n\n if not word:\n continue\n\n result[word] = result.setdefault(word, 0) + 1\n\n return result\n\ndef remove_punctuation(word):\n \"\"\"Remove select punctuation from a word\"\"\"\n\n punct = '.,:;\"/!@#$%^&*()+_-=[]{}<>?'\n\n result = word\n\n for char in punct:\n result = result.replace(char, '')\n\n return result\n\ndef get_avg_haha(transcript):\n \"\"\"Hahaha\"\"\"\n\n result = {}\n pattern = re.compile('\\\\b(ha)+\\\\b', re.IGNORECASE)\n\n for msg in transcript:\n line = msg['text']\n uid = msg['user_id']\n\n if from_sys(msg) or not line or 'http' in line:\n continue\n\n pat_result = pattern.search(line)\n\n if pat_result:\n pat_result = pat_result.group(0)\n result[uid] = result.setdefault(uid, []) + [len(pat_result)/2]\n\n for uid in result:\n avg = sum(result[uid]) / len(result[uid])\n result[uid] = avg\n\n return result\n\ndef validate_word_unicode(word):\n \"\"\"\n Certain special characeters give the Windows console trouble.\n Catch these characters as gracefully as possible, flagging them\n without crashing the whole script.\n \"\"\"\n\n for char in word:\n try:\n print(char + '\\b', end='')\n except UnicodeEncodeError:\n return False\n\n return True\n\ndef get_like_breakdown(transcript):\n \"\"\"Get table of who has received likes from whom\"\"\"\n\n result = {}\n\n for msg in transcript:\n uid = msg['user_id']\n likes = msg['favorited_by']\n\n if from_sys(msg):\n continue\n\n for giver in likes:\n if uid not in result:\n result[uid] = {}\n result[uid][giver] = result[uid].setdefault(giver, 0) + 1\n\n return result\n\ndef report_over_time(days_active, names, info):\n \"\"\"\n Normalize a user's data point by the days active in the chat\n \"\"\"\n\n for table in info:\n\n scaled_data = {}\n\n orig_data = table[0]\n title = table[1]\n\n for uid in orig_data:\n if uid in names and uid in days_active:\n scaled_data[uid] = float(\"{0:.2f}\".format(orig_data[uid] / days_active[uid]))\n\n report_uid_data(scaled_data, names, title + \" per day\")\n\ndef get_pet_scores(transcript, pets_source):\n \"\"\"\n Get number of times user has mentioned his/her\n pets. +1 per msg, so multiple mentions per message\n are ignored.\n \"\"\"\n\n result = {}\n\n pet_names = {}\n\n for uid in pets_source:\n pet_names[uid] = pets_source[uid].split(\"+\")\n\n for msg in transcript:\n uid = msg['user_id']\n text = msg['text']\n\n if text is None or uid not in pet_names:\n continue\n\n for name in pet_names[uid]:\n found = name in text.lower()\n result[uid] = result.setdefault(uid, 0) + (1 if found else 0)\n\n return result\n\ndef msg_quality(msgs_sent, likes_received):\n \"\"\"Calculate message quality: likes received per message.\"\"\"\n\n result = {}\n\n for uid in likes_received:\n result[uid] = likes_received[uid] / msgs_sent[uid]\n\n return result\n\ndef report_by_gender(data, amt, title):\n \"\"\"Utility function to report gender-specific data\"\"\"\n\n (val_fem, val_men) = data\n (amt_fem, amt_men) = amt\n\n print(title + \" by gender\")\n\n if only_print_titles:\n return\n\n print(\"Women\", val_fem, 'Per woman', val_fem/amt_fem, sep=',')\n print(\"Men\", val_men, 'Per man', val_men/amt_men, sep=',')\n print()\n\ndef get_fem_ids(filename, active_users):\n \"\"\"Get IDs of all women in this chat from women.csv\"\"\"\n\n if not os.path.isfile(filename):\n print('-W- File \"' + filename + '\" not found - skipping gender analysis', file=sys.stderr)\n return None\n\n fem_ids = []\n\n fh_lines = []\n with open(filename) as fh:\n fh_lines = fh.readlines()\n\n for line in fh_lines:\n line = line.strip()\n\n if line:\n words = line.split(',')\n fem_ids.append(words[0])\n\n amt_fem = len(fem_ids)\n amt_men = len(active_users) - amt_fem\n\n print(\"Number of women\", amt_fem, sep=',')\n print(\"Number of men\", amt_men, sep=',')\n print()\n\n return fem_ids, (amt_fem, amt_men)\n\ndef get_days_active(transcript):\n \"\"\"Return days that the user has been active.\n\n Difference, in days, between the first message posted by each user,\n and the final message of the transcript.\"\"\"\n\n result = {}\n start_date = {}\n\n for msg in transcript:\n if from_sys(msg):\n continue\n\n uid = msg['user_id']\n date = datetime.fromtimestamp(msg['created_at'])\n\n last_date = date\n\n if uid not in start_date:\n start_date[uid] = date\n\n for uid in start_date:\n result[uid] = (last_date - start_date[uid]).days\n\n if not result[uid]:\n result[uid] = 1\n\n return result\n\ndef split_by_gender(data, fem_ids):\n \"\"\"Returns (amt_fem, amt_men)\"\"\"\n\n amt_fem = 0\n amt_men = 0\n\n for uid in data:\n val = data[uid]\n\n if uid in fem_ids:\n amt_fem += val\n else:\n amt_men += val\n\n return (amt_fem, amt_men)\n\ndef report_uid_data(data, names, title):\n \"\"\"Report any data stored as (UID, datum) KV pair\"\"\"\n print(title)\n\n if only_print_titles:\n return\n\n sorted_uids = sorted(data, key=data.get, reverse=True)\n\n for uid in sorted_uids:\n # If someone hasn't posted a single message, they won't be in names\n if uid in names and uid in data:\n print(names[uid], round(data[uid], DEC_PLACES), sep=',')\n\n print()\n\ndef from_sys(msg):\n \"\"\"\n Return true if this message was created by an automated\n GroupMe system account. Several exist per chat.\n \"\"\"\n\n if msg['system'] or msg['user_id'] == 'calendar':\n return True\n\n return False\n\ndef date_analysis(names, transcript):\n \"\"\"Do some date analysis.\"\"\"\n\n # List of date objects\n dates = []\n\n for msg in transcript:\n if from_sys(msg):\n continue\n # print(datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))\n dates.append(datetime.fromtimestamp(msg['created_at']))\n\n weekday_freq(dates)\n hour_freq(dates)\n\ndef weekday_freq(dates):\n \"\"\"Measure total message frequency per workday\"\"\"\n \n activity = {}\n day_names = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n\n for date in dates:\n wd = date.weekday()\n activity[wd] = activity.setdefault(wd, 0) + 1\n\n print(\"Messages per weekday\")\n\n for i in range(7):\n print(day_names[i], activity[i], sep=',')\n\n print()\n\n return activity\n\ndef regex_count(transcript, regex):\n \"\"\"Count number of 'boop' messages sent.\"\"\"\n\n result = {}\n pattern = re.compile(regex, re.IGNORECASE)\n\n for msg in transcript:\n line = msg['text']\n uid = msg['user_id']\n\n if line is None:\n continue\n\n pat_result = pattern.findall(line)\n\n if pat_result:\n result[uid] = result.setdefault(uid, 0) + len(pat_result)\n\n return result\n\n\ndef hour_freq(dates):\n \"\"\"Messages per hour across all days\"\"\"\n\n activity = {}\n\n for date in dates:\n hr = date.hour\n\n activity[hr] = activity.setdefault(hr, 0) + 1\n\n print(\"Messages per hour\")\n\n for i in range(24):\n if i == 0:\n hr_s = \"12 am\"\n elif 1 <= i <= 11:\n hr_s = str(i) + \" am\"\n elif i == 12:\n hr_s = \"12 pm\"\n else:\n hr_s = str(i - 12) + \" pm\"\n\n print(hr_s, activity[i], sep=',')\n\n print()\n\n return activity\n\ndef basic_user_stats(transcript):\n \"\"\"Simple stuff: messages sent, characters sent.\"\"\"\n\n msgs_sent = {}\n chars_sent = {}\n\n for msg in transcript:\n if from_sys(msg):\n continue\n\n line = msg['text']\n uid = msg['user_id']\n\n msgs_sent[uid] = msgs_sent.setdefault(uid, 0) + 1\n\n # Text can be null\n if line is None:\n continue\n\n chars_sent[uid] = chars_sent.setdefault(uid, 0) + len(line)\n\n\n return (msgs_sent, chars_sent)\n\ndef calc_likes(transcript):\n \"\"\"Calculate the likes given and received for each user\n \n Returns a set of dicts: (likes_given, likes_received)\n \n Each KV pair in the dicts has the format (uid -> likes)\n uid = string\n likes = int\"\"\"\n\n likes_received = {}\n likes_given = {}\n\n # Some low-participation users may like a few comments\n # and never leave messages of their own. Without this,\n # there is no way to associate a UID with a name.\n #\n # Assume these users have not made many contributions,\n # ignore them\n #\n # Shouldn't make too much of a difference, since a separate routine\n # captures totals irrespective of UID.\n active_users = set()\n\n for msg in transcript:\n if from_sys(msg):\n continue\n\n uid = msg['user_id']\n likes = msg['favorited_by']\n\n active_users.add(uid)\n\n # First capture likes received\n m_rec = len(msg['favorited_by'])\n likes_received[uid] = likes_received.setdefault(uid, 0) + m_rec\n\n # Add likes given\n for giver in likes:\n likes_given[giver] = likes_given.setdefault(giver, 0) + 1\n\n # Filter out low-participation users\n likes_given = {k:likes_given[k] for k in likes_given if k in active_users}\n\n return (likes_given, likes_received)\n\ndef get_like_details(transcript, names):\n \"\"\"Print table of users' message popularity\"\"\"\n\n result = {}\n max_likes = 0\n\n for msg in transcript:\n uid = msg['user_id']\n likes = len(msg['favorited_by'])\n\n max_likes = max(max_likes, likes)\n\n if uid not in names:\n continue\n\n if uid not in result:\n result[uid] = {}\n\n result[uid][likes] = result[uid].setdefault(likes, 0) + 1\n\n print(\"Messages with x likes\")\n\n # Print header for section\n for i in range(0, max_likes+1):\n print(\",\" + str(i), end = \"\")\n print()\n\n for uid in result:\n print(names[uid], end='')\n\n for i in range(0, max_likes+1):\n print(',', result[uid].get(i, 0), sep='', end='')\n\n print()\n\n print()\n\ndef get_best_msgs(transcript, names):\n \"\"\"Returns most popular messages given a threshold\"\"\"\n\n result = []\n\n for msg in transcript:\n uid = msg['user_id']\n text = msg['text']\n pic = msg['picture_url']\n score = len(msg['favorited_by'])\n\n text = text if text else \"no text\"\n pic = pic if pic else \"no pic\"\n\n text = text.replace('\\n', '\\\\')\n\n if uid not in names or score < popular_min:\n continue\n\n result.append([names[uid], score, pic, to_ascii(text)])\n\n print(\"Messages with\", popular_min, \"or more likes\")\n\n for r in result:\n print(r[0], r[1], r[2], r[3], sep=',')\n\n print()\n\ndef get_liked_freq(transcript):\n \"\"\"Returns how often messages get one heart, etc\"\"\"\n\n result = {}\n\n for msg in transcript:\n score = len(msg['favorited_by'])\n\n result[score] = result.setdefault(score, 0) + 1\n\n print(\"Messages with x hearts\")\n\n for s in sorted(result):\n print (str(s) + ',' + str(result[s]))\n\n print()\n\ndef get_like_matches(transcript, likes_given):\n \"\"\"\n Count the number of times each pair of users\n has liked the same message.\n \"\"\"\n\n result = {}\n\n uids = likes_given.keys()\n\n # Initialize data structure\n # For now, stores redundant values\n for uid in uids:\n result[uid] = {}\n for u2 in uids:\n result[uid][u2] = 0\n\n for msg in transcript:\n likes = [x for x in msg['favorited_by'] if x in uids]\n\n for u1 in likes:\n for u2 in likes:\n if u1 != u2:\n result[u1][u2] += 1\n\n # Reframe as percentage of user2's likes\n for focus in result:\n for other in result[focus]:\n val = result[focus][other]\n result[focus][other] = val / likes_given[other] * 100\n\n return result\n\ndef get_uids_from_csv(filename):\n \"\"\"\n Read a CSV where the first column is UIDs, and the second\n is a value. Return it as a dictionary.\n \"\"\"\n\n result = {}\n\n if not os.path.isfile(filename):\n print('-W- File \"' + filename + '\" not found - skipping', file=sys.stderr)\n return {}\n\n with open(filename) as fh:\n fh_lines = fh.readlines()\n\n for line in fh_lines:\n line = line.strip()\n\n if line:\n cols = line.split(',')\n result[cols[0]] = cols[1]\n\n return result\n\ndef print_all_uids(transcript):\n \"\"\"Print all UIDs and names in CSV format\n as they appear in the transcript.\n \n ,\"\"\"\n\n # List of UIDs already printed\n uids = []\n\n for msg in transcript:\n uid = msg['user_id']\n name = msg['name']\n\n if uid not in uids:\n uids.append(uid)\n print(uid, to_ascii(name), sep=',')\n\ndef gen_markov_chain(transcript, names):\n \"\"\"Generate a Markov chain based on Groupme data\n Return an MC of message data, and an MC of who responds to whom\"\"\"\n\n markov_print(\"START gen_markov_chain\")\n\n valid_chars = \"abcdefghijklmnopqrstuvwxyz\"\n valid_chars += valid_chars.upper()\n valid_chars += \"0123456789\"\n\n valid_char_list = [\n \"',-\",\n \".?!@$%&*()\\\\/:\",\n '\"'\n ]\n\n for i in range(0, markov_allow_level):\n valid_chars += valid_char_list[i]\n\n markov_print(\"Valid characters = \" + valid_chars)\n \n # 1. Generate the probability table\n # This is done naively, by adding each word\n # as it is encountered.\n # Probabilities are not stored directly.\n\n # markov chain of UID's and words\n # key uid -> key word -> list words\n #\n # UID -> word1 -> word\n # word\n # word\n # word2 -> word\n # word\n mc = {}\n mc['all'] = {}\n\n # markov chain of who follows who\n uid_mc = {}\n last_uid = None\n\n for msg in transcript:\n uid = msg['user_id']\n line = msg['text']\n\n if markov_fix_uid and uid != markov_fix_uid:\n continue\n\n if uid not in names or line is None:\n continue\n\n if uid not in mc:\n mc[uid] = {}\n\n if markov_all_lower:\n line = line.lower()\n\n if last_uid and uid != last_uid:\n uid_mc[uid] = uid_mc.setdefault(uid, []) + [last_uid]\n\n curr_phrase = MARKOV_START\n\n key = uid if markov_user_chains or markov_fix_uid else 'all'\n words_orig = line.split()\n words = []\n\n # First, sanitize words in-place\n for word in words_orig:\n if markov_allow_level > 0:\n i = 0\n while i < len(word):\n if word[i] not in valid_chars:\n word = word[:i] + word[i+1:]\n continue\n i+=1\n\n if word == \"\":\n continue\n\n words.append(word)\n\n # Is this message above the minimum length?\n if len(words) < markov_msg_min_words:\n continue\n\n gen_end = len(words) - markov_chain_length\n\n for i in range(0, len(words)):\n curr_phrase = ' '.join(words[i:i+markov_chain_length])\n\n # If starting, START -> First multi-words phrase\n # After this, all keys are multi-word, and all values are single-word\n if i == 0:\n mc[key][MARKOV_START] = mc[key].setdefault(MARKOV_START, []) + [curr_phrase]\n\n # At end of chain, add end flag\n if i >= gen_end:\n addon = MARKOV_END\n else:\n addon = words[i+markov_chain_length]\n\n mc[key][curr_phrase] = mc[key].setdefault(curr_phrase, []) + [addon]\n\n last_uid = uid\n\n # Print Markov chain and exit\n if markov_debug_table:\n\n total = 0\n count = 0\n\n for uid in mc:\n print(uid + \":\")\n for key in mc[uid]:\n print(to_ascii(\" \" + key))\n count += 1\n for item in mc[uid][key]:\n print(to_ascii(\" \" + to_ascii(item)))\n total += 1\n\n print(\"MEAN val amt(\" + str(markov_chain_length) + \") = \",\n total/count)\n return\n\n return (mc, uid_mc)\n\ndef gen_markov_output(markov_data, names):\n \"\"\"Take in Markov chains of message data and user-to-user frequency\n Print randomly generated messages\"\"\"\n\n markov_print(\"START gen_markov_output\")\n\n (mc, uid_mc) = markov_data\n\n # -- Generate output --\n\n # Pick random user to start\n uids = list(names.keys())\n uid = choice(uids)\n\n markov_print(\"Generating output...\")\n\n for i in range(0, markov_output_msg_amt):\n if markov_fix_uid:\n uid = markov_fix_uid\n\n\n word = MARKOV_START\n line = []\n while True:\n key = uid if markov_user_chains or markov_fix_uid else 'all'\n word_list = mc[key][word]\n\n line += choice(word_list).split()\n word = ' '.join(line[-markov_chain_length:])\n\n if MARKOV_END in line:\n if line[-1] != MARKOV_END:\n print(\"-E- Line: \" + to_ascii(line) + \"is corrupt!\")\n sys.exit(1)\n else:\n # Shouldn't be added in the first place...\n line.remove(MARKOV_END)\n break\n \n print(names[uid] + \":\", to_ascii(' '.join(line)))\n\n last_uid = uid\n\n if not markov_fix_uid:\n uid = choice(uid_mc[uid])\n\ndef markov_print(msg):\n if markov_verbose_flag:\n print(msg, file=markov_verbose_file)\n\ndef to_ascii(msg):\n \"\"\"Convert a Unicode string to ASCII for printing\"\"\"\n return str(msg.encode('utf-8').decode('ascii', 'ignore'))\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n", "sub_path": "work.py", "file_name": "work.py", "file_ext": "py", "file_size_in_byte": 37363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "sys.stderr", "line_number": 138, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 188, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 190, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 192, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 193, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 200, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 201, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 203, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 204, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 750, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 750, "usage_type": "attribute"}, {"api_name": "os.path.path.isfile", "line_number": 878, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 878, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 878, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 879, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 918, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 918, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 986, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 986, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 1014, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "os.path.path.isfile", "line_number": 1251, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 1251, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 1251, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 1252, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 1422, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 1437, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1443, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 1454, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1466, "usage_type": "call"}]} +{"seq_id": "512525247", "text": "# Go through all with class classname 'item-link-container'\n\n# https://www.hemnet.se/salda/bostader?location_ids%5B%5D=17880&page=2&sold_age=all\n# change page number above here\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport time\nimport pandas as pd\n\nclass HemnetScraper:\n\n '''\n Need to add date sold, what type of house and address\n '''\n\n\n def __init__(self):\n self.current_page = 1\n\n\n def scrape_all_prices(self):\n\n all_houses = []\n\n url = 'https://www.hemnet.se/salda/bostader?location_ids%5B%5D=17880&page={}&sold_age=all'.format(self.current_page)\n response = requests.get(url)\n\n\n # Look for something that shows that there are no more pages\n while self.current_page < 11:\n soup = self.get_soup(url)\n target_tag = soup.find_all('a', class_='item-link-container', href=True)\n # These give specific links to each house which have more information about each house\n # Therefore, for every tag we need to make a new request and find the information about\n # the specific house at that page. \n # NOTE:\n # Coordinates for the house can be found in the following tag: \n # div id=\"map\" class=\"sold-property__map js-listing-map-sold\" data-initial-data=\"{coordinate":[57.78513229627912,12.629472520465715]\n for tag in target_tag:\n specific_soup = self.get_soup(tag['href'])\n target = specific_soup.find('div', id='map', class_='sold-property__map')\n # Find the two numbers inside target - they reside inside square brackets\n # and ONLY that square bracket exists.\n lat, lon = self.get_lat_lon(target)\n addr = self.get_address(specific_soup)\n\n # Get the details of the house sale\n # House price looks like the following: \n #
\n # Slutpris\n # 1 750 000 kr\n #
\n house_div = specific_soup.find('span', class_='sold-property__price-value')\n house_price = house_div.text\n\n attributes = specific_soup.find_all('dt', class_='sold-property__attribute')\n values = specific_soup.find_all('dd', class_='sold-property__attribute-value')\n\n tmp_house = {}\n\n for attr, value in zip(attributes, values):\n cleaned_attr = attr.text \\\n .strip() \\\n .replace(r'\\xa', '')\n cleaned_value = value.text \\\n .strip() \\\n .replace(r'\\xa', '')\n tmp_house[cleaned_attr] = cleaned_value\n\n\n price_info = specific_soup.find('p', class_='sold-property__metadata')\n uncleaned_info = price_info.text.strip()\n uncleaned_info = uncleaned_info.replace('\\n', '')\n tmp_house['address-sold-type'] = uncleaned_info\n # TODO:\n # Find the address, date sold and type of house inside uncleaned info\n\n tmp_house['lat'] = lat\n tmp_house['long'] = lon\n tmp_house['address'] = addr\n\n all_houses.append(tmp_house)\n \n url = 'https://www.hemnet.se/salda/bostader?location_ids%5B%5D=17880&page={}&sold_age=all'.format(self.current_page)\n self.current_page += 1\n\n\n all_column_names = []\n for house in all_houses:\n for key, val in house.items():\n all_column_names.append(key)\n\n columns = set(all_column_names)\n\n all_houses_data_list = []\n for house in all_houses:\n tmp_lst = []\n for key in columns:\n try:\n tmp_lst.append(house[key])\n except:\n tmp_lst.append('n/a')\n all_houses_data_list.append(tmp_lst)\n\n df = pd.DataFrame(all_houses_data_list, columns=columns)\n\n df.to_csv('houses.csv')\n\n \n\n\n def get_lat_lon(self, target):\n lat = None\n lon = None\n try: \n bracket = str(target).strip().split('[')\n end_bracket = bracket[1].split(']')\n lat = end_bracket[0].split(',')[0]\n lon = end_bracket[0].split(',')[1]\n return float(lat), float(lon)\n except IndexError as e:\n print (e)\n return 'n/a', 'n/a'\n\n\n def get_address(self, target):\n addr = target.find('h1', class_='sold-property__address').text\n addr = addr.lower().split('slutpris')[1].strip()\n return addr\n\n\n def get_soup(self, url):\n response = requests.get(url)\n html = response.content\n soup = BeautifulSoup(html, features='html.parser')\n print(url)\n time.sleep(1)\n return soup", "sub_path": "python_scripts/hemnet_scraper.py", "file_name": "hemnet_scraper.py", "file_ext": "py", "file_size_in_byte": 5147, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 136, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 138, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "299227896", "text": "# Jupyter notebook for regression models (Gradient Boosting, Random Forest Regression)\n# CSC691 Final Project\n# PaceMakers: Predicting Average Heart Rate for Bike Rides\n# Patrick, Esteban, Sarah\n# 12/2/19\n\n\nimport pandas as pd\nfrom dateutil.parser import parse\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import RFE\nfrom sklearn.linear_model import LogisticRegression\nimport statsmodels\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import roc_auc_score\nimport numpy as np\nimport csv\nimport warnings\nimport math\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import ensemble\nfrom sklearn.metrics import mean_squared_error, accuracy_score\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nnp.set_printoptions(suppress=True)\npd.set_option('display.max_columns', None) # or 1000\npd.set_option('display.max_rows', None) # or 1000\n\n\ndef prep_data(df):\n df = df[['Avg HR (bpm)','Date','Type','Distance (km)','Avg Pace (/km)','Calories','HRSS','Elevation Gain (m)']]\n df = df.rename(columns={\"Avg HR (bpm)\": \"AvgHR\"})\n df = df[df.AvgHR != '-']\n types = ['Ride', 'VirtualRide']\n df = df[df.Type.isin(types)]\n df = df.reset_index(drop=True)\n\n # Convert features to numbers\n df[\"AvgHR\"] = pd.to_numeric(df[\"AvgHR\"])\n df[\"Calories\"] = pd.to_numeric(df[\"Calories\"])\n df[\"HRSS\"] = pd.to_numeric(df[\"HRSS\"])\n\n # Convert Avg Pace to seconds, parse Date and Time as separate columns\n for i in range(df.shape[0]):\n (m, s) = str(df.loc[i,'Avg Pace (/km)']).split(':')\n df.loc[i,'Avg Pace (/km)']= (int(m) * 60) + int(s)\n dt = parse(df.loc[i,'Date'])\n df.loc[i,'Date'] = dt.date()\n df.loc[i,'Time'] = dt.time()\n # Convert Avg Pace to number\n df['Avg Pace (/km)'] = pd.to_numeric(df['Avg Pace (/km)'])\n\n # Create binary labels for High 'AvgHR' (1) and Low 'AvgHR' (0) based on threshold of 154 bpm\n for j in range(df.shape[0]):\n if int(df.loc[j,'AvgHR']) > 154:\n #print(df.loc[j,'AvgHR'])\n df.loc[j,'AvgHR_bin'] = 1\n else:\n df.loc[j,'AvgHR_bin'] = 0\n\n return df\n\ndef train_test(df, window_size):\n # Calculate sample size of each class\n count_no_sub = len(df[df['AvgHR_bin']==1])\n count_sub = len(df[df['AvgHR_bin']==0])\n pct_of_no_sub = count_no_sub/(count_no_sub+count_sub)\n print(\"percentage of High AvgHR is\", '%.2f' %(pct_of_no_sub*100))\n pct_of_sub = count_sub/(count_no_sub+count_sub)\n print(\"percentage of Low AvgHR is\", '%.2f' %(pct_of_sub*100))\n\n # Define train and test set, perform RFE\n # window_size = 294\n # removed Calories as feature since p-value was 0.07 > 0.05 (from below Logit function), as recommended\n df_vars = ['Distance (km)', 'Avg Pace (/km)', 'HRSS', 'Elevation Gain (m)','AvgHR']\n df_bin = ['AvgHR_bin']\n\n df_final = df[df_vars]\n df_final_bin = df[df_bin]\n\n df_final_vars=df_final.columns.values.tolist()\n df_final_bin_vars=df_final_bin.columns.values.tolist()\n\n y=df_final.AvgHR\n X=[i for i in df_final_vars if i not in y]\n X_bin = [i for i in df_final_bin_vars if i not in y]\n\n X = df_final.loc[:, df_final.columns != 'AvgHR']\n y = df_final.loc[:, df_final.columns == 'AvgHR']\n X_bin = df_final_bin.loc[:, df_final_bin.columns == 'AvgHR_bin']\n\n # Configure train and test sets\n X_train = X.iloc[window_size:]\n y_train = y.iloc[window_size:]\n\n X_test = X.iloc[:window_size]\n y_test = y.iloc[:window_size]\n # print(\"X test: \", X_test, \"y test: \", y_test)\n\n # Perform RFE (recursive feature elimination) to determine ranking of features\n logreg = LogisticRegression()\n rfe = RFE(logreg, 20)\n rfe = rfe.fit(X_train, y_train.values.ravel())\n print(\"RFE support: \", rfe.support_)\n print(\"RFE ranking: \", rfe.ranking_)\n\n # Implement Logit model to determine p-values and coefficients for each feature\n # logit_model = sm.Logit(y, X)\n # result = logit_model.fit()\n # print(\"Logit results: \", result.summary2())\n\n return X, X_bin, y\n\ndef model(window_size, X, y, clf):\n print('# Predictions: ', window_size)\n print('Window Size: ', 309 - window_size)\n\n X_train = X.iloc[window_size:]\n y_train = y.iloc[window_size:]\n X_test = X.iloc[:window_size]\n y_test = y.iloc[:window_size]\n X_bin_test = X_bin.iloc[:window_size]\n\n\n # train set\n print('train set size: ', y_train.shape[0])\n # print(y_train)\n # test set\n print('test set size: ', y_test.shape[0])\n # print(y_test)\n\n actuals = pd.DataFrame(y_test)\n actuals = actuals.rename(columns={'AvgHR': 'Actuals'})\n preds = np.zeros(X_test.shape[0])\n preds_bin = np.zeros(X_test.shape[0])\n actuals_bin = np.zeros(X_test.shape[0])\n # print('preds (zeros): ', preds)\n\n for i in range(0, y_test.shape[0]):\n #print('Iteration: ', i)\n #print('X train shape: ', X_train.shape[0])\n #print('X test shape: ', y_test.shape[0])\n clf.fit(X_train, y_train.values.ravel())\n # Predict test set\n y_pred = clf.predict(np.array(X_test.iloc[-1]).reshape(1, -1))\n #print('actual: ', y_test.loc[0, 'AvgHR_bin'], '\\n pred: ', y_pred, '\\n')\n preds[i] = y_pred\n if y_pred > 154:\n preds_bin[i] = 1\n else:\n preds_bin[i] = 0\n actuals_bin[i] = df_bin.loc[X_bin_test.iloc[-1],'AvgHR_bin']\n mse = mean_squared_error(y_test.iloc[-1], y_pred)\n\n # print('Accuracy of logistic regression classifier on test set {}: {:.2f}'.format(i, logreg.score(X_test, y_test)))\n\n # X_train = pd.concat([X_test.iloc[0], X_train]).reset_index(drop = True)\n # print('new X train: ', X_train.head())\n\n X_test_inst = pd.DataFrame(data=[X_test.iloc[-1]],\n columns=[\"Distance (km)\", \"Avg Pace (/km)\", \"HRSS\", \"Elevation Gain (m)\"])\n y_test_inst = pd.DataFrame(data=[y_test.iloc[-1]], columns=[\"AvgHR\"])\n # print(\"X test inst: \", X_test_inst)\n X_train = X_train.drop(X_train.index[-1]).reset_index(drop=True)\n X_train = pd.concat([X_test_inst, X_train]).reset_index(drop=True)\n # print(\"X train: \\n\", X_train)\n\n y_train = y_train.drop(y_train.index[-1])\n y_train = pd.concat([y_test_inst, y_train])\n y_train = y_train.reset_index(drop=True)\n # print(\"y train \\n\", y_train)\n\n X_test = X_test.drop(X_test.index[-1])\n X_test = X_test.reset_index(drop=True)\n # print(\"X test: \\n\", X_test)\n\n y_test = y_test.drop(y_test.index[-1])\n y_test = y_test.reset_index(drop=True)\n\n preds_act_df = pd.DataFrame(preds, columns=['Predictions'])\n preds_act_df = preds_act_df.join(actuals.iloc[::-1].reset_index(drop=True))\n # print('actuals and preds: \\n', preds_act_df)\n # print('actuals bin: ', actuals_bin)\n # print('preds bin: ', preds_bin)\n # print('predictions: ', preds)\n accuracy = metrics.accuracy_score(actuals_bin, preds_bin)\n print('accuracy for window size {}: {}'.format(309 - window_size, '%.3f' % (accuracy)))\n final_mse = metrics.mean_squared_error(preds_act_df.Actuals.ravel(), preds_act_df.Predictions.ravel())\n print('mse for window size {}: {}'.format(window_size, '%.3f' % (final_mse)))\n\n return accuracy, actuals_bin, preds_bin, final_mse\n\ndef find_optimal_window(clf, max_window):\n accuracies = np.zeros(max_window-1)\n mses = np.zeros(max_window-1)\n\n for window_size in range(1,max_window):\n accuracy, actuals, preds, mse = model(window_size, X, y, clf)\n accuracies[window_size-1] = accuracy\n mses[window_size-1] = mse\n\n print('Accuracies: ', accuracies)\n print('MSEs: ', mses)\n\n # Output accuracies for each window size to Accuracies_for_Window_Size_Variations.csv file\n with open('MSEs_for_Window_Size_Variations_' + clf_name + '.csv', 'w') as f:\n for i in range(0, len(mses)):\n f.write(str(308 - i) + ': ' + str(mses[i]))\n f.write('\\n')\n\ndef read_windows():\n # Read in accuracies for each window size from Accuracies_for_Window_Size_Variations.csv file and sort by accuracy to determine best window size\n mylist = pd.DataFrame(columns=['Window', 'MSE'])\n with open('MSEs_for_Window_Size_Variations_' + clf_name + '.csv', 'r') as csvfile:\n for i, row in enumerate(csv.reader(csvfile, delimiter='\\n')):\n mylist.loc[i, 'Window'] = row[0].split(':')[0]\n mylist.loc[i, 'MSE'] = float(row[0].split(':')[1])\n print(mylist.sort_values('MSE', 0, ascending=True))\n\ndef read_look_aheads():\n mylist = pd.DataFrame(columns=[])\n with open('Accuracies_for_Look_Ahead_Variations_' + clf_name + '.csv', 'r') as csvfile:\n for i, row in enumerate(csv.reader(csvfile, delimiter='\\n')):\n mylist.loc[i, 'Look Ahead'] = row[0].split(':')[0]\n mylist.loc[i, 'Accuracy'] = row[0].split(':')[1]\n print(mylist.sort_values('Accuracy', 0, ascending=False))\n print('\\n')\n mylist2 = pd.DataFrame(columns=[])\n with open('MSEs_for_Look_Ahead_Variations_' + clf_name + '.csv', 'r') as csvfile:\n for i, row in enumerate(csv.reader(csvfile, delimiter='\\n')):\n mylist2.loc[i, 'Look Ahead'] = row[0].split(':')[0]\n mylist2.loc[i, 'MSE'] = float(row[0].split(':')[1])\n print(mylist2.sort_values('MSE', 0, ascending=True))\n\ndef results(actuals, preds):\n confusion_matrix1 = confusion_matrix(actuals, preds)\n print(confusion_matrix1)\n\n tn, fp, fn, tp = confusion_matrix(actuals, preds).ravel()\n print('true neg: ', tn, '\\nfalse pos: ', fp, '\\nfalse neg: ', fn, '\\ntrue pos: ',tp)\n\n # Precision, recall, f1-score, support (# of test instances per class)\n print(classification_report(actuals, preds))\n\ndef find_optimal_lookahead(window_size, X, y, clf):\n accuracies = np.zeros(window_size)\n mses = np.zeros(window_size)\n for m in range(1, window_size + 1):\n accuracy, predictions, actuals, mse = model_lookahead(window_size, X, y, m, clf)\n accuracies[m-1] = accuracy\n mses[m-1] = mse\n\n avg_mse_df = pd.DataFrame(data=mses, columns=['MSE'])\n print('MSEs: \\n', avg_mse_df)\n with open('MSEs_for_Look_Ahead_Variations_' + clf_name + '.csv', 'w') as f:\n # f.write(\"Look Ahead : Accuracy \\n\")\n for i in range(0, len(mses)):\n f.write(str(i + 1) + ': ' + str(mses[i]))\n f.write('\\n')\n acc_df = pd.DataFrame(data=accuracies, columns=['Accuracy'])\n print('Average Accuracies: \\n', accuracies)\n with open('Accuracies_for_Look_Ahead_Variations_' + clf_name + '.csv', 'w') as f:\n # f.write(\"Look Ahead : Accuracy \\n\")\n for i in range(0, len(accuracies)):\n f.write(str(i + 1) + ': ' + str(accuracies[i]))\n f.write('\\n')\n\ndef model_lookahead(window_size, X, y, look_ahead, clf):\n print('# Predictions: ', window_size)\n print('Window Size: ', 309 - window_size)\n print('Look Ahead Value: ', look_ahead)\n print('--------------------------------------')\n\n # train set\n X_train = X.iloc[window_size:]\n y_train = y.iloc[window_size:]\n print('train size: ', y_train.shape[0])\n # test set\n X_test = X.iloc[:window_size]\n y_test = y.iloc[:window_size]\n print('test size: ', y_test.shape[0])\n X_bin_test = X_bin.iloc[:window_size]\n\n actuals = []\n predictions = []\n actuals_bin = []\n preds_bin = []\n\n accuracies = np.zeros(math.ceil(y_test.shape[0] / look_ahead))\n computations = np.zeros(math.ceil(y_test.shape[0] / look_ahead))\n mses = np.zeros(math.ceil(y_test.shape[0] / look_ahead))\n\n for i in range(0, math.ceil(y_test.shape[0] / look_ahead)):\n print('# iterations: ', math.ceil(y_test.shape[0] / look_ahead))\n preds = np.zeros(min(look_ahead, y_test.shape[0]))\n # rf_probs = np.zeros(min(look_ahead, y_test.shape[0]))\n actuals_look_ahead = np.zeros(min(look_ahead, y_test.shape[0]))\n computations[i] = min(look_ahead, y_test.shape[0])\n print('Iteration: ', i)\n print('----------------')\n print('X train shape: ', X_train.shape[0])\n print('X test shape: ', y_test.shape[0])\n clf.fit(X_train, y_train.values.ravel())\n\n # Predict test set\n for j in range(1, min(look_ahead + 1, y_test.shape[0] + 1)):\n y_pred = clf.predict(np.array(X_test.iloc[-j]).reshape(1, -1))\n # print('actual: ', y_test.iloc[-j], '\\n pred: ', y_pred, '\\n')\n preds[j - 1] = y_pred\n actuals_look_ahead[j - 1] = y_test.iloc[-j]\n\n if y_pred > 154:\n preds_bin.append(1)\n else:\n preds_bin.append(0)\n predictions.append(preds[j-1])\n actuals_bin.append(df_bin.loc[X_bin_test.iloc[-1], 'AvgHR_bin'].values[0])\n # rf_probs[j - 1] = clf.predict_proba(np.array(X_test.iloc[-j]).reshape(1, -1))[:, 1]\n\n\n X_test_inst = pd.DataFrame(columns=[\"Distance (km)\", \"Avg Pace (/km)\", \"HRSS\", \"Elevation Gain (m)\"])\n y_test_inst = pd.DataFrame(columns=[\"AvgHR\"])\n for k in range(1, min(look_ahead + 1, y_test.shape[0] + 1)):\n X_test_inst = X_test_inst.append(X_test.iloc[-k])\n y_test_inst = y_test_inst.append(y_test.iloc[-k])\n #print(\"X test inst: \", X_test_inst)\n\n X_train = X_train.drop(X_train.index[-1]).reset_index(drop=True)\n X_train = pd.concat([X_test_inst, X_train]).reset_index(drop=True)\n # print(\"X train: \\n\", X_train)\n\n y_train = y_train.drop(y_train.index[-1])\n y_train = pd.concat([y_test_inst, y_train]).reset_index(drop=True)\n # print(\"y train \\n\", y_train)\n\n X_test = X_test.drop(X_test_inst.index.values)\n X_test = X_test.reset_index(drop=True)\n # print(\"X test: \\n\", X_test)\n\n y_test = y_test.drop(y_test_inst.index.values)\n y_test = y_test.reset_index(drop=True)\n\n print(\"predictions: \", preds)\n print(\"actuals: \", actuals_look_ahead)\n for x in range(len(actuals_look_ahead)):\n actuals.append(actuals_look_ahead[x])\n mse = metrics.mean_squared_error(preds, actuals_look_ahead)\n\n # roc_value = roc_auc_score(actuals_look_ahead, rf_probs)\n\n print('mse for iteration {} at window size={} with look ahead value={}: {}'.format(i, 309 - window_size, look_ahead, '%.3f' % (mse)))\n mses[i] = mse\n\n print(\"\\npredict bins: \", preds_bin)\n print(\"actual bins: \", actuals_bin)\n print('predictions: ', predictions)\n print('actuals: ', actuals)\n accuracy = metrics.accuracy_score(preds_bin, actuals_bin)\n print('overall accuracy for window size={}, look ahead value={}: {}'.format(309-window_size, look_ahead, '%.3f' % (accuracy)))\n print('mses: ', mses)\n\n #avg_accuracy = 0\n avg_mse = 0\n for g in range(len(accuracies)):\n # avg_accuracy += (computations[g] / window_size) * accuracies[g]\n avg_mse += (computations[g] / window_size) * mses[g]\n #print('\\noverall weighted accuracy for window size={}, look ahead={}: {}'.format(window_size, look_ahead, '%.3f' % (avg_accuracy)))\n print('overall weighted mse for window size={}, look ahead={}: {}\\n'.format(309-window_size, look_ahead, '%.3f' % (avg_mse)))\n\n return accuracy, predictions, actuals, avg_mse\n\ndef save_results(window_size, predictions, actuals):\n # Output predictions and actuals for final window size to csv file\n with open(clf_name + '.csv', 'w') as f:\n f.write(\"Prediction, Actual \\n\")\n for i in range(0,window_size):\n f.write(str(predictions[i]) + ', ' + str(actuals[i]))\n f.write('\\n')\n\n\n\n\n# Please update the path to the activities.csv file from the repository\ndf = pd.read_csv('activities.csv')\n# Prepare data\ndf_bin = prep_data(df)\n\n# max_window = df_bin.shape[0]\n\n# Define X, binary X, and y for test and train set\nX, X_bin, y = train_test(df_bin, 128)\n\nclf_name = input('Please specify the regression model you wish to test (LinR, GB, or RFR) for Linear Regression, '\n 'Gradient Boosting Regression, or Random Forest Regression: \\n')\n\nif clf_name == 'GB':\n print('Analysis for Gradient Boosting\\n -------------------------------------')\n optimal_window = 7\n params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,\n 'learning_rate': 0.01, 'loss': 'ls'}\n clf = ensemble.GradientBoostingRegressor(**params)\n look_ahead = 7\n\n # Find optimal window size for GB\n all_iterations = input(\n \"Would you like to iterate over all possible window sizes to find the optimal window size (Y/N)? If so, please note this may take an extensive amount of time. \\n\")\n if all_iterations == 'Y':\n find_optimal_window(clf,277)\n read_windows()\n\n # Run GB for optimal window size of 128\n GBinput = input(\n 'Please indicate Y/N to proceed with running GB with the optimal window size specified: {} \\n'.format(\n 309 - optimal_window))\n if GBinput == 'Y':\n print('Running GB with the optimal window size {}...'.format(optimal_window))\n accuracy, actuals, preds, mse = model(optimal_window, X, y, clf)\n results(actuals, preds)\n\n # Find optimal look ahead value for GB\n all_iterations_lookahead = input(\n \"Would you like to iterate over all possible look ahead values to find the optimal look ahead value (Y/N)? If so, please note this may take an extensive amount of time. \\n\")\n if all_iterations_lookahead == 'Y':\n find_optimal_lookahead(optimal_window,X,y,clf)\n read_look_aheads()\n\n # Run LR for optimal window size of 128 and optimal look_ahead of 118\n LR_lookinput = input(\n 'Please indicate Y/N to proceed with running GB with the optimal window size and look ahead value: {}, {} \\n'.format(\n 309 - optimal_window, look_ahead))\n if LR_lookinput == 'Y':\n avg_accuracy, predictions, actuals, mse = model_lookahead(optimal_window, X, y, look_ahead, clf)\n # Save predictions and actuals for LR to LR.csv file\n save_results(optimal_window, predictions, actuals)\n\nif clf_name == 'RFR':\n print('Analysis for Random Forest Regression\\n -------------------------------------')\n optimal_window = 7\n clf = ensemble.RandomForestRegressor(n_estimators=100)\n look_ahead = 7\n\n # Find optimal window size for RFR\n RFRall_iterations = input(\n \"Would you like to iterate over all possible window sizes to find the optimal window size (Y/N)? If so, please note this may take an extensive amount of time. \\n\")\n if RFRall_iterations == 'Y':\n find_optimal_window(clf,277)\n read_windows()\n\n # Run RFR for optimal window size of 302\n RFRinput = input(\n 'Please indicate Y/N to proceed with running GB with the optimal window size specified: {} \\n'.format(\n 309 - optimal_window))\n if RFRinput == 'Y':\n accuracy, actuals, preds, mse = model(optimal_window, X, y, clf)\n results(actuals, preds)\n\n # Find optimal look ahead value for RFR\n RFRall_iterations_lookahead = input(\n \"Would you like to iterate over all possible look ahead values to find the optimal look ahead value (Y/N)? If so, please note this may take an extensive amount of time. \\n\")\n if RFRall_iterations_lookahead == 'Y':\n find_optimal_lookahead(optimal_window,X,y,clf)\n read_look_aheads()\n\n # Run RF for optimal window size of 302 and optimal look_ahead of 7\n LR_lookinput = input(\n 'Please indicate Y/N to proceed with running GB with the optimal window size and look ahead value: {}, {} \\n'.format(\n 309 - optimal_window, look_ahead))\n if LR_lookinput == 'Y':\n avg_accuracy, predictions, actuals, mse = model_lookahead(optimal_window, X, y, look_ahead, clf)\n # Save predictions and actuals for LR to RF.csv file\n save_results(optimal_window, predictions, actuals)\n\nif clf_name == 'LinR':\n print('Analysis for Linear Regression\\n -------------------------------------')\n optimal_window = 11\n clf = LinearRegression()\n look_ahead = 11\n\n # Find optimal window size for LinR\n Linall_iterations = input(\n \"Would you like to iterate over all possible window sizes to find the optimal window size (Y/N)? If so, please note this may take an extensive amount of time. \\n\")\n if Linall_iterations == 'Y':\n print('Running all iterations of Linear Regression to determine optimal window size...')\n find_optimal_window(clf,277)\n read_windows()\n\n # Run LinR for optimal window size\n Lininput = input(\n 'Please indicate Y/N to proceed with running LinR with the optimal window size specified: {} \\n'.format(\n 309 - optimal_window))\n if Lininput == 'Y':\n print('Running Linear Regression with optimal window size...')\n accuracy, actuals, preds, mse = model(optimal_window, X, y, clf)\n results(actuals, preds)\n\n # Find optimal look ahead value for RFR\n Linall_iterations_lookahead = input(\n \"Would you like to iterate over all possible look ahead values to find the optimal look ahead value (Y/N)? If so, please note this may take an extensive amount of time. \\n\")\n if Linall_iterations_lookahead == 'Y':\n print('Running all iterations to find optimal look ahead value...')\n find_optimal_lookahead(optimal_window,X,y,clf)\n read_look_aheads()\n\n # Run RF for optimal window size of 302 and optimal look_ahead of 7\n Lin_lookinput = input(\n 'Please indicate Y/N to proceed with running LinR with the optimal window size and look ahead value: {}, {} \\n'.format(\n 309 - optimal_window, look_ahead))\n if Lin_lookinput == 'Y':\n print('Running Linear Regression with optimal window size and optimal look ahead value...')\n avg_accuracy, predictions, actuals, mse = model_lookahead(optimal_window, X, y, look_ahead, clf)\n # Save predictions and actuals for LR to RF.csv file\n save_results(optimal_window, predictions, actuals)\n", "sub_path": "regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 22255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "warnings.simplefilter", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 45, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 157, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 164, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 166, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 169, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 173, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 184, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 190, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 190, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 192, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 199, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 217, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 219, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 227, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 232, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 234, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 240, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 243, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 251, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 257, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 293, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 294, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 295, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 295, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 297, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 311, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 326, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 333, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 337, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 351, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 351, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 362, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 362, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 388, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 405, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 405, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 443, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 443, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 480, "usage_type": "call"}]} +{"seq_id": "85055432", "text": "import jsonpickle\nfrom django.core.serializers import serialize\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.views import View\n\nfrom cartapp.cartmanager import SessionCartManager\nfrom userapp.models import UserInfo, Area, Address\nfrom utils.code import gene_code\n\n\nclass RegisterView(View):\n def get(self, request):\n return render(request, 'register.html')\n\n def post(self, request):\n # 获取请求参数\n uname = request.POST.get('account', '')\n pwd = request.POST.get('password', '')\n # 注册用户信息\n try:\n user = UserInfo.objects.get(uname=uname, pwd=pwd)\n return render(request, 'register.html')\n except UserInfo.DoesNotExist:\n user = UserInfo.objects.create(uname=uname, pwd=pwd)\n # 将注册的用户对象存入session中,session中无法直接存入对象,使用jsonpickle序列化成json格式\n request.session['user'] = jsonpickle.dumps(user)\n return redirect('/user/center/')\n\n\ndef center_view(request):\n return render(request, 'center.html')\n\n\nclass LoginView(View):\n def get(self, request):\n # 标识登录的来源页面,方便登录后跳转到购物车页面\n reflag = request.GET.get('reflag', '')\n return render(request, 'login.html', {'reflag': reflag})\n\n def post(self, request):\n # 获取请求参数\n uname = request.POST.get('account', '')\n pwd = request.POST.get('password', '')\n\n # 获取登录后跳转页面标识,是否来自于购物车页面的登录\n reflag = request.POST.get('reflag', '')\n\n cartitems = request.POST.get('cartitems', '')\n\n totalPrice = request.POST.get('totalPrice', '')\n # 判断是否登录成功\n user = UserInfo.objects.filter(uname=uname, pwd=pwd)\n if user:\n request.session['user'] = jsonpickle.dumps(user[0])\n\n # 将session中的购物项目存放到数据库\n SessionCartManager(request.session).migrateSession2DB()\n\n if reflag == 'cart':\n return redirect('/cart/queryAll/')\n elif reflag == 'order':\n return redirect('/order/?cartitems=' + cartitems + '&totalPrice=' + totalPrice)\n\n return redirect('/user/center/')\n\n return redirect('/user/login/')\n\n\nclass LoadCodeView(View):\n def get(self, request):\n # 获取图片验证码\n img, code = gene_code()\n request.session['session_code'] = code\n return HttpResponse(img, content_type='image/png') # content_type指定图片的渲染格式\n\n\nclass CheckCodeView(View):\n def get(self, request):\n # 获取请求参数\n code = request.GET.get('code', -1)\n # 获取session中生成的验证码\n session_code = request.session.get('session_code', '')\n # 判断是否相等\n vflag = False\n if code == session_code:\n vflag = True\n # 返回响应\n return JsonResponse({'vflag': vflag})\n\n\nclass LogoutView(View):\n def post(self, request):\n # 清空session中所有数据\n request.session.flush()\n # 返回响应\n return JsonResponse({'logout': True})\n\n\nclass AddressView(View):\n def get(self, request):\n # 获取当前登录用户下的收货地址信息\n # 获取当前登录用户对象\n user_str = request.session.get('user', '')\n if user_str:\n # 将session数据转换成对象\n user = jsonpickle.loads(user_str)\n addr_list = user.address_set.all()\n return render(request, 'address.html', {'addr_list': addr_list})\n\n def post(self, request):\n # 获取请求参数\n aname = request.POST.get('aname', '')\n aphone = request.POST.get('aphone', '')\n addr = request.POST.get('addr', '')\n # 获取当前登录用户对象\n user_str = request.session.get('user', '')\n if user_str:\n # 将session数据转换成对象\n user = jsonpickle.loads(user_str)\n\n # 插入数据库表\n # 从 一的模型 查找 多的模型通过 '多的模型小写名_set' 查找\n # lambda参考onenote笔记\n Address.objects.create(aname=aname, aphone=aphone, addr=addr, userinfo=user,\n isdefault=(lambda count: True if count == 0 else False)(user.address_set.count()))\n return redirect('/user/address/')\n\n\ndef load_area_view(request):\n # 获取请求参数\n pid = request.GET.get('pid', -1)\n pid = int(pid)\n area_list = Area.objects.filter(parentid=pid)\n # 序列化数据\n jarea_list = serialize('json', area_list)\n return JsonResponse({'jarea_list': jarea_list})\n\n\ndef update_default_addr_view(request):\n # 获取请求参数\n addr_id = request.GET.get('addrid', -1)\n addr_id = int(addr_id)\n # 修改数据\n Address.objects.filter(id=addr_id).update(isdefault=True)\n Address.objects.exclude(id=addr_id).update(isdefault=False)\n return redirect('/user/address/')\n\n\ndef del_addr_view(request):\n # 获取请求参数\n addr_id = request.GET.get('addrid', -1)\n addr_id = int(addr_id)\n # 删除数据\n Address.objects.get(id=addr_id).delete()\n return redirect('/user/address/')\n", "sub_path": "userapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "django.views.View", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "userapp.models.UserInfo.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "userapp.models.UserInfo.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "userapp.models.UserInfo", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "userapp.models.UserInfo.DoesNotExist", "line_number": 26, "usage_type": "attribute"}, {"api_name": "userapp.models.UserInfo", "line_number": 26, "usage_type": "name"}, {"api_name": "userapp.models.UserInfo.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "userapp.models.UserInfo.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "userapp.models.UserInfo", "line_number": 27, "usage_type": "name"}, {"api_name": "jsonpickle.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "userapp.models.UserInfo.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "userapp.models.UserInfo.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "userapp.models.UserInfo", "line_number": 55, "usage_type": "name"}, {"api_name": "jsonpickle.dumps", "line_number": 57, "usage_type": "call"}, {"api_name": "cartapp.cartmanager.SessionCartManager", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 69, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 72, "usage_type": "name"}, {"api_name": "utils.code.gene_code", "line_number": 75, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 77, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 80, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 94, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 99, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 102, "usage_type": "name"}, {"api_name": "jsonpickle.loads", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 111, "usage_type": "call"}, {"api_name": "jsonpickle.loads", "line_number": 122, "usage_type": "call"}, {"api_name": "userapp.models.Address.objects.create", "line_number": 127, "usage_type": "call"}, {"api_name": "userapp.models.Address.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "userapp.models.Address", "line_number": 127, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 129, "usage_type": "call"}, {"api_name": "userapp.models.Area.objects.filter", "line_number": 136, "usage_type": "call"}, {"api_name": "userapp.models.Area.objects", "line_number": 136, "usage_type": "attribute"}, {"api_name": "userapp.models.Area", "line_number": 136, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 138, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 139, "usage_type": "call"}, {"api_name": "userapp.models.Address.objects.filter", "line_number": 147, "usage_type": "call"}, {"api_name": "userapp.models.Address.objects", "line_number": 147, "usage_type": "attribute"}, {"api_name": "userapp.models.Address", "line_number": 147, "usage_type": "name"}, {"api_name": "userapp.models.Address.objects.exclude", "line_number": 148, "usage_type": "call"}, {"api_name": "userapp.models.Address.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "userapp.models.Address", "line_number": 148, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 149, "usage_type": "call"}, {"api_name": "userapp.models.Address.objects.get", "line_number": 157, "usage_type": "call"}, {"api_name": "userapp.models.Address.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "userapp.models.Address", "line_number": 157, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "598618294", "text": "#!/usr/bin/env python3\n# sutimar pengpinij\n# 590510137\n# Lab 09\n# Problem 3\n# 204113 Sec 001\n\nimport time\nimport datetime\nimport random\n\ndef main():\n today = datetime.datetime.now()\n x = int(today.microsecond)\n rand_a1 = qrand_dice(x)\n rand_a2 = qrand_dice(rand_a1)\n rand_b1 = qrand_dice(rand_a2)\n rand_b2 = qrand_dice(rand_b1)\n \n \n sum_a = rand_a1 + rand_a2\n draw_player(\"A\")\n drawing(rand_a1, rand_a2, sum_a)\n\n sum_b = rand_b1 + rand_b2 \n draw_player(\"B\")\n drawing(rand_b1, rand_b2, sum_b)\n\n\n if abs(sum_a - 8) < abs(sum_b - 8):\n result = \"A won !!\"\n elif abs(sum_a - 8) == abs(sum_b - 8):\n result = \"cat game \"\n else:\n result = \"B won !!\"\n\n for i in range (3):\n for j in range (32):\n if i==2 :\n print(\"-------------END----------------\")\n break\n else:\n print(\"-\",end=\"\")\n print()\n\n finish(result)\n\ndef choosing_index(ls):\n num = random.randrange(0, len(ls))\n return num\n\ndef drawing(score_1, score_2, sum_score):\n pic = \"\"\"_________________________________\n| | |\n| | |\n| {0} | {1} |\n| | |\n| | | \n|_______________|_______________|\"\"\".format(score_1, score_2)\n\n print(pic)\n print()\n print(\"sum score : {0}\".format(score_1 + score_2))\n print()\n\ndef draw_player(key):\n print(\"-----///// PLAYER : {0} /////-----\".format(key))\n\ndef finish(key):\n print(\"------->>> {0} <<<-------\".format(key))\n\ndef qrand_dice(x):\n c = 1\n a = 186\n b = 7\n m = 6\n xi = (((a * (x**2)) + (b*x) + c) % m)\n xi += 1\n\n return xi\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "204113/Lab09/qrand_dice.py", "file_name": "qrand_dice.py", "file_ext": "py", "file_size_in_byte": 1784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "463818283", "text": "# coding:gbk\n# Required Packages\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n#href:https://ryancheunggit.gitbooks.io/calculus-with-python/content/08NewtonIterMethod.html\nf = lambda x: x ** 2 - 2 * x - 4\nl1 = lambda x: 2 * x - 8\nl2 = lambda x: 6 * x - 20\n\nx = np.linspace(0, 5, 100)\n\nplt.plot(x, f(x), 'black')\nplt.plot(x[30:80], l1(x[30:80]), 'blue', linestyle='--')\nplt.plot(x[66:], l2(x[66:]), 'blue', linestyle='--')\n\nl = plt.axhline(y=0, xmin=0, xmax=1, color='black')\nl = plt.axvline(x=2, ymin=2.0 / 18, ymax=6.0 / 18, linestyle='--')\nl = plt.axvline(x=4, ymin=6.0 / 18, ymax=10.0 / 18, linestyle='--')\n\nplt.text(1.9, 0.5, r\"$x_0$\", fontsize=18)\nplt.text(3.9, -1.5, r\"$x_1$\", fontsize=18)\nplt.text(3.1, 1.3, r\"$x_2$\", fontsize=18)\n\nplt.plot(2, 0, marker='o', color='r')\nplt.plot(2, -4, marker='o', color='r')\nplt.plot(4, 0, marker='o', color='r')\nplt.plot(4, 4, marker='o', color='r')\nplt.plot(10.0 / 3, 0, marker='o', color='r')\n\nplt.show()\n", "sub_path": "p20160515/pNewton.py", "file_name": "pNewton.py", "file_ext": "py", "file_size_in_byte": 956, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "numpy.linspace", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "372600398", "text": "# if you are putting your test script folders under {git project folder}/tests/, it will work fine.\n# otherwise, you either add it to system path before you run or hard coded it in here.\n\nINPUT_LIB_PATH = sys.argv[1]\nsys.path.append(INPUT_LIB_PATH)\n\nimport os\nimport common\nimport basecase\nimport gsheet\n\nimport shutil\nimport browser\nimport time\n\n\nclass Case(basecase.SikuliInputLatencyCase):\n\n def run(self):\n # Disable Sikuli action and info log\n com = common.General()\n com.infolog_enable(False)\n com.set_mouse_delay(0)\n\n # Prepare\n app = gsheet.gSheet()\n sample1_file_path = os.path.join(self.INPUT_IMG_SAMPLE_DIR_PATH, self.INPUT_IMG_OUTPUT_SAMPLE_1_NAME)\n sample1_file_path = sample1_file_path.replace(os.path.splitext(sample1_file_path)[1], '.png')\n capture_width = int(self.INPUT_RECORD_WIDTH)\n capture_height = int(self.INPUT_RECORD_HEIGHT)\n\n # Launch browser\n my_browser = browser.Firefox()\n\n # Access link and wait\n my_browser.clickBar()\n my_browser.enterLink(self.INPUT_TEST_TARGET)\n app.wait_for_loaded()\n\n # Wait for stable\n sleep(2)\n\n # PRE ACTIONS\n\n # Customized Region\n customized_region_name = 'end'\n type_area = self.find_match_region(app.GSHEET_TAB_IDENTIFIER, similarity=0.75)\n modified_area = self.tuning_region(type_area, x_offset=-70, y_offset=-50, w_offset=200, h_offset=50)\n self.set_override_region_settings(customized_region_name, modified_area)\n\n # Record T1, and capture the snapshot image\n # Input Latency Action\n loc, screenshot, t1 = app.click_2nd_tab(capture_width, capture_height)\n\n # In normal condition, a should appear within 100ms,\n # but if lag happened, that could lead the show up after 100 ms,\n # and that will cause the calculation of AIL much smaller than expected.\n sleep(0.1)\n\n # Record T2\n t2 = time.time()\n\n # POST ACTIONS\n\n # Write timestamp\n com.updateJson({'t1': t1, 't2': t2}, self.INPUT_TIMESTAMP_FILE_PATH)\n\n # Write the snapshot image\n shutil.move(screenshot, sample1_file_path)\n\ncase = Case(sys.argv)\ncase.run()\n", "sub_path": "tests/regression/gsheet/test_firefox_gsheet_ail_clicktab_0.sikuli/test_firefox_gsheet_ail_clicktab_0.py", "file_name": "test_firefox_gsheet_ail_clicktab_0.py", "file_ext": "py", "file_size_in_byte": 2238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "basecase.SikuliInputLatencyCase", "line_number": 17, "usage_type": "attribute"}, {"api_name": "common.General", "line_number": 21, "usage_type": "call"}, {"api_name": "gsheet.gSheet", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "browser.Firefox", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "524445525", "text": "# https://pyyaml.org/wiki/PyYAMLDocumentation\n\nimport sys\nimport os\nimport yaml\nfrom yaml_def import Person\nfrom cv2 import line\n\nos.makedirs(\"output\", exist_ok=True)\n\nfilename=\"output/person.yaml\" \n\n\ndef dump(filename=filename):\n obj_list = [Person(\"Jon Smith\", 20, \"123 E 1st St\", [\"408-123-4567\", \"480-987-6543\"], [\"jsmith@abc.com\"]),\n Person(\"Tim Martis\", 31, \"456 S 2nd Ave\", [\"650-333-4123\"], [\"tmar@nba.com\", \"tmar@tw.net\"]),\n Person(\"Vim Lux\", 42, \"9870 W Oak Ln\", [\"928-234-2323\", \"602-354-7901\"], [\"vlux@ligga.com\"]),\n ]\n obj_str = yaml.dump(obj_list)\n with open(filename, \"w\") as f:\n f.write(obj_str)\n print(\"dump completed at %s\" % filename)\n\n\ndef load(filename=filename):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n obj_str = \"\"\n for line in lines:\n obj_str += line\n print(\"obj_str: \", obj_str)\n obj = yaml.load(obj_str, yaml.FullLoader) # okay - list of objects\n #obj = yaml.load(obj_str, yaml.UnsafeLoader) # okay - list of objects\n #obj = yaml.load(obj_str, yaml.BaseLoader) # a dictionary can have extra fields not defined or less fields\n print(type(obj), obj)\n\n\nif __name__ == \"__main__\":\n if sys.argv[1] == '1':\n dump()\n else:\n load()\n", "sub_path": "python/yaml/load_n_save_yaml_def.py", "file_name": "load_n_save_yaml_def.py", "file_ext": "py", "file_size_in_byte": 1289, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "os.makedirs", "line_number": 9, "usage_type": "call"}, {"api_name": "yaml_def.Person", "line_number": 15, "usage_type": "call"}, {"api_name": "yaml_def.Person", "line_number": 16, "usage_type": "call"}, {"api_name": "yaml_def.Person", "line_number": 17, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 29, "usage_type": "name"}, {"api_name": "cv2.line", "line_number": 30, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 32, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "609069448", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nA representation of a HAR Request.\n\"\"\"\n\nimport enum\nfrom datetime import datetime\nfrom typing import Iterator, NamedTuple, List, Optional\nfrom urllib.parse import urlparse, SplitResult\n\nimport pendulum\n\nfrom transformer.naming import to_identifier\n\n\nclass HttpMethod(enum.Enum):\n \"\"\"\n Enumeration of HTTP method types.\n \"\"\"\n\n GET = enum.auto()\n POST = enum.auto()\n PUT = enum.auto()\n OPTIONS = enum.auto()\n DELETE = enum.auto()\n\n\nclass Header(NamedTuple):\n \"\"\"\n HTTP header as recorded in HAR file.\n \"\"\"\n\n name: str\n value: str\n\n\nclass QueryPair(NamedTuple):\n \"\"\"\n Query String as recorded in HAR file.\n \"\"\"\n\n name: str\n value: str\n\n\nclass Request(NamedTuple):\n \"\"\"\n An HTTP request as recorded in a HAR file.\n \"\"\"\n\n timestamp: datetime\n method: HttpMethod\n url: SplitResult\n headers: List[Header]\n post_data: dict\n query: List[QueryPair]\n name: Optional[str] = None\n\n @classmethod\n def from_har_entry(cls, entry: dict) -> \"Request\":\n \"\"\"\n Creates a request from a HAR entry.\n \"\"\"\n\n request = entry[\"request\"]\n return Request(\n timestamp=pendulum.parse(entry[\"startedDateTime\"]),\n method=HttpMethod[request[\"method\"]],\n url=urlparse(request[\"url\"]),\n name=None,\n headers=[\n Header(name=d[\"name\"], value=d[\"value\"])\n for d in request.get(\"headers\", [])\n ],\n post_data=request.get(\"postData\", {}),\n query=[\n QueryPair(name=d[\"name\"], value=d[\"value\"])\n for d in request.get(\"queryString\", [])\n ],\n )\n\n @classmethod\n def all_from_har(cls, har: dict) -> Iterator[\"Request\"]:\n \"\"\"\n Generates requests for all entries in a given HAR file.\n \"\"\"\n\n for entry in har[\"log\"][\"entries\"]:\n yield cls.from_har_entry(entry)\n\n def task_name(self) -> str:\n \"\"\"\n Generates a simple name suitable for use as a Python function.\n \"\"\"\n\n return \"_\".join(\n (\n self.method.name,\n self.url.scheme,\n to_identifier(self.url.hostname),\n to_identifier(self.url.path),\n str(abs(hash(self))),\n )\n )\n\n def __hash__(self) -> int:\n return hash(\n (\n self.timestamp,\n self.method,\n self.url,\n tuple(self.post_data) if self.post_data else None,\n )\n )\n", "sub_path": "transformer/request.py", "file_name": "request.py", "file_ext": "py", "file_size_in_byte": 2611, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "enum.Enum", "line_number": 16, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 21, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 22, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 23, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 24, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.NamedTuple", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.NamedTuple", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.NamedTuple", "line_number": 46, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "name"}, {"api_name": "urllib.parse.SplitResult", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 57, "usage_type": "name"}, {"api_name": "pendulum.parse", "line_number": 67, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 69, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 83, "usage_type": "name"}, {"api_name": "transformer.naming.to_identifier", "line_number": 100, "usage_type": "call"}, {"api_name": "transformer.naming.to_identifier", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "195339622", "text": "# coding: utf-8\nimport numpy as np\nfrom cv2 import cv2\nimport matplotlib.pyplot as plt\n\nimg1 = cv2.imread(r'pictures\\star.jpg', 0)\nimg2 = cv2.imread(r'pictures\\star2.jpg', 0)\nimg3 = cv2.imread(r'pictures\\rectangle1.jpg', 0)\n\nret, thresh = cv2.threshold(img1, 127, 255, 0)\nret, thresh2 = cv2.threshold(img2, 127, 255, 0)\nret, thresh3 = cv2.threshold(img3, 127, 255, 0)\ncontours, hierarchy = cv2.findContours(thresh, 2, 1)\ncnt1 = contours[0]\ncontours, hierarchy = cv2.findContours(thresh2, 2, 1)\ncnt2 = contours[0]\ncontours, hierarchy = cv2.findContours(thresh3, 2, 1)\ncnt3 = contours[0]\n\nret0 = cv2.matchShapes(cnt1, cnt1, 1, 0.0)\nret1 = cv2.matchShapes(cnt1, cnt2, 1, 0.0)\nret2 = cv2.matchShapes(cnt1, cnt3, 1, 0.0)\nprint(ret0, ret1, ret2)\n", "sub_path": "4.9.5-contours_match_shape.py", "file_name": "4.9.5-contours_match_shape.py", "file_ext": "py", "file_size_in_byte": 740, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "cv2.cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 6, "usage_type": "name"}, {"api_name": "cv2.cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 7, "usage_type": "name"}, {"api_name": "cv2.cv2.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 8, "usage_type": "name"}, {"api_name": "cv2.cv2.threshold", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 10, "usage_type": "name"}, {"api_name": "cv2.cv2.threshold", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 11, "usage_type": "name"}, {"api_name": "cv2.cv2.threshold", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 12, "usage_type": "name"}, {"api_name": "cv2.cv2.findContours", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 13, "usage_type": "name"}, {"api_name": "cv2.cv2.findContours", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 15, "usage_type": "name"}, {"api_name": "cv2.cv2.findContours", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 17, "usage_type": "name"}, {"api_name": "cv2.cv2.matchShapes", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 20, "usage_type": "name"}, {"api_name": "cv2.cv2.matchShapes", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 21, "usage_type": "name"}, {"api_name": "cv2.cv2.matchShapes", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "122264895", "text": "#!/usr/bin/env python3\n\nimport os, subprocess, re, datetime\n\nclass Event:\n def __init__(self, title, diff, ongoing, time, url):\n self.title = title\n self.title_cut = self.title[:100].strip()\n self.diff = diff\n self.human_diff = ':'.join(str(self.diff).split(':')[:-1])\n self.ongoing = ongoing\n self.time = time\n self.url = url\n if self.ongoing:\n self.human_str = f\"{self.title_cut} {self.human_diff} left\"\n else:\n self.human_str = f\"{self.title_cut} in {self.human_diff}\"\n\n def __repr__(self):\n return f\"Event(title: {self.title}, diff: {self.diff}, ongoing: {self.ongoing}, time: {self.time}, url: {self.url})\"\n\ndef get_events():\n datetime_format = '%d %b %Y %H:%M'\n now = datetime.datetime.now()\n url_pattern = r'\\b((?:https?://)?(?:(?:www\\.)?(?:[\\da-z\\.-]+)\\.(?:[a-z]{2,6})|(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))(?::[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])?(?:/[\\w\\.-]*)*/?)\\b'\n\n cmd = \"icalBuddy -n -nc -nrd -npn -ea -ps '/|/' -nnr '' -b '' -ab '' -iep 'title,notes,datetime' eventsToday+1\"\n output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]\n lines = output.decode('utf-8').strip().split('\\n')\n\n events = []\n if lines == ['']: return events\n\n for line in lines:\n splat = line.split('|')\n title = splat[0]\n\n urls = re.findall(url_pattern, splat[1])\n url = (urls + [None])[0]\n if url is not None and 'meet' in url: url += '?authuser=l.kurpiewski@codeandpepper.com'\n\n timerange = splat[-1].replace('at ', '')\n starttime, endtime = timerange.split(' - ')\n endtime = datetime.datetime.strptime(starttime[:-5] + endtime, datetime_format)\n starttime = datetime.datetime.strptime(starttime, datetime_format)\n\n ongoing = starttime <= now <= endtime\n if ongoing:\n diff = endtime-now\n else:\n diff = starttime-now\n\n time = ' '.join(timerange.split()[3:])\n\n events.append(Event(title, diff, ongoing, time, url))\n return events\n\ndef generate_main_text(events):\n next_event_text = ' 􀄧 ' + events[1].human_str if (len(events) > 1 and events[0].ongoing) else ''\n return events[0].human_str + next_event_text\n\ndef plugin_undraw():\n args = [\n '--set upcoming drawing=off',\n # '--set \"seperator_upcoming\" drawing=off',\n ]\n os.system('sketchybar -m ' + ' '.join(args))\n\ndef plugin_draw(main_text, popup_items):\n args = [\n f'--set upcoming label=\"{main_text}\"',\n '--set upcoming drawing=on',\n # '--set \"seperator_upcoming\" drawing=on',\n ]\n\n for i,item in enumerate(popup_items):\n args += [\n f'--add item upcoming.{i} popup.upcoming',\n f'--set upcoming.{i} background.padding_left=10',\n f'--set upcoming.{i} background.padding_right=10',\n f'--set upcoming.{i} label=\"{item[\"text\"]}\"',\n f'--set upcoming.{i} click_script=\"open {item[\"url\"]} ; sketchybar -m --set upcoming popup.drawing=off\"'\n ]\n\n print(args)\n os.system('sketchybar -m ' + ' '.join(args))\n\nif __name__ == '__main__':\n events = get_events()\n if len(events) == 0:\n plugin_undraw()\n else:\n main_text = generate_main_text(events)\n plugin_draw(main_text, ({'text': e.human_str, 'url': e.url} for e in events))\n", "sub_path": "sketchybar/.config/sketchybar/backup_theme_1/plugins/upcoming.py", "file_name": "upcoming.py", "file_ext": "py", "file_size_in_byte": 4100, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 28, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 67, "usage_type": "call"}, {"api_name": "os.system", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "634969193", "text": "import re\r\nimport sqlite3\r\nimport datetime\r\nimport sys\r\nfrom scipy import spatial\r\n\r\n\r\nclass Command(object):\r\n\tdef __init__(self):\r\n\t\tself.commandREGEX = re.compile('^.*?(?=\\s)')\r\n\t\tself.parameterREGEX = re.compile('(?=^.*?\\s(.*))')\r\n\t\tself.commands = { \r\n\t\t\t\"addbook\" : self.addBook,\r\n\t\t\t\"showlibrary\" : self.showLibrary,\r\n\t\t\t\"editbook\" : self.editBook,\r\n\t\t\t\"checkout\" : self.checkoutBook,\r\n\t\t\t\"return\" : self.returnBook,\r\n\t\t\t\"removebook\" : self.removeBook,\r\n\t\t\t\"help\" : self.help\r\n\t\t}\r\n\r\n\tdef handle_command(self, user, input, slackClient):\r\n\t\tresponse = \"<@\" + user + \">: \\n\"\r\n\t\tcommand = \"\"\r\n\t\tparameters = \"\"\r\n\t\tself.slackCLient = slackClient\r\n\t\tself.user = user\r\n\r\n\t\tif(len(sys.argv)>1):\r\n\t\t\targuments = sys.argv[1]\r\n\t\t\tif (arguments == \"fixLibraryUsers\"):\r\n\t\t\t\tself.fixLibraryUsers()\r\n\t\t\t\texit()\r\n\r\n\t\t#Collect command\r\n\t\tif(self.commandREGEX.search(input)):\r\n\t\t\tcommand = self.commandREGEX.match(input).group()\r\n\r\n\t\t#Collect parameters\r\n\t\tif(self.parameterREGEX.search(input)):\r\n\t\t\tparameters = self.parameterREGEX.match(input).group(1)\r\n\r\n\t\t#this condition is helpful for commands that don't have parameters.\r\n\t\tif(command == \"\"):\r\n\t\t\tcommand=input;\r\n\r\n\t\t#COMMAND NOT IN COMMAND LIST\r\n\t\tif command in self.commands:\r\n\t\t\tresponse += self.commands[command](parameters)\r\n\t\telse:\r\n\t\t\tresponse += \"Sorry I don't understand the command: \" + command + \". \" + self.help(\"\")\r\n\r\n\t\t#RESET USER\r\n\t\tself.user=\"\"\r\n\t\treturn response\r\n\t\t\r\n\tdef addBook(self, parameters):\r\n\t\ttitle = parameters\r\n\t\tauthor = \"N/A\"\r\n\t\towner = self.user\r\n\t\tresult = \"BOOK ADDED\"\r\n\r\n\t\t#SQLLITE CONNECTION\r\n\t\tlibrary = sqlite3.connect('library')\r\n\r\n\t\t#PARAMETER PARSING\r\n\t\ttitle = title.replace(\"\\\"\", \"\")\r\n\r\n\t\t#SQL MAGIC\r\n\t\ttry:\r\n\t\t\tlibrary.execute('''INSERT INTO books(title, author, owner, checkedOutBy, checkoutDate)\r\n\t\t \t\t VALUES(?,?,?,?,?)''', (title, author, owner, \"Available\", \"\"))\r\n\t\t\tlibrary.commit()\r\n\t\t\tlibrary.close()\r\n\t\texcept:\r\n\t\t\tresult = \"There was an error while processing your request, please try again.\"\r\n\r\n\t\t#LOGGING\r\n\t\tLibraryLog = open(\"LibraryLog.log\", \"a\")\r\n\t\tLibraryLog.writelines(datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\") + \" : \" + self.get_user_name(self.user)+\" ADDED BOOK \" + title + \"\\n\")\r\n\t\tLibraryLog.close()\r\n\t\treturn result\r\n\r\n\tdef showLibrary(self, parameters ):\r\n\t\tresult=\"\"\r\n\t\t#SQLLITE CONNECTION\r\n\t\tlibrary = sqlite3.connect('library')\r\n\t\tlibraryCurser = library.cursor()\r\n\r\n\t\t#SQL MAGIC\r\n\t\ttry:\r\n\t\t\tlibraryCurser.execute('''SELECT title, author, owner, checkedOutBy, checkoutDate FROM books''')\r\n\t\texcept:\r\n\t\t\treturn \"There was an error while processing your request, please try again.\"\r\n\t\t#DISPLAY QUERY\r\n\t\tcounter=0\r\n\t\tfor row in libraryCurser.fetchall():\r\n\t\t\tresult += str(counter) + \". Title: \" + row[0] + \" \\n\tAuthor: \" + row[1] + \" \\n\tOwner: \" + self.get_user_name(row[2])+ \" \\n\tChecked Out By: \" + self.get_user_name(row[3]) + \" \\n\tCheckOut Date: \" + row[4] + \"\\n\"\r\n\t\t\tcounter += 1\r\n\t\tlibrary.close()\r\n\t\treturn result\r\n\r\n\tdef editBook(self, parameters):\r\n\t\tfieldREGEX = re.compile('(\\\".*?\\\") (\\\".*?\\\") (\\\".*?\\\")')\r\n\t\tresult = \"EDIT COMPLETE\"\r\n\r\n\t\t#SQLLITE CONNECTION\r\n\t\tlibrary = sqlite3.connect('library')\r\n\t\tlibraryCurser = library.cursor()\r\n\r\n\t\t#PARAMETER PARSING\r\n\t\ttry:\r\n\t\t\ttitle=fieldREGEX.match(parameters).group(1)\r\n\t\t\tfield=fieldREGEX.match(parameters).group(2)\r\n\t\t\tupdate=fieldREGEX.match(parameters).group(3)\r\n\t\texcept:\r\n\t\t\treturn \"Invalid Parameters\\n editbook [\\\"title\\\"] [\\\"field\\\"] [\\\"fieldUpdate\\\"]\\n\"\r\n\t\ttitle = self.cleanInput(title)\r\n\t\tfield = self.cleanInput(field)\r\n\t\tupdate = self.cleanInput(update)\r\n\r\n\t\t#SQL MAGIC\r\n\t\ttry:\r\n\t\t\t# check if the title exists and gather recommendations\r\n\t\t\trecommendations = self.getRecommendations(title)\r\n\r\n\t\t\t# if title exists, checkout the book\r\n\t\t\tif (recommendations == \"MATCH\"):\r\n\t\t\t\tif (field == \"author\"):\r\n\t\t\t\t\tlibraryCurser.execute('''UPDATE books SET author = ? WHERE title = ? ''', (update, title))\r\n\t\t\t\telif (field == \"owner\"):\r\n\t\t\t\t\tlibraryCurser.execute('''UPDATE books SET owner = ? WHERE title = ? ''',\r\n\t\t\t\t\t\t\t\t\t\t (self.get_user_ID(update), title))\r\n\t\t\t\telif (field == \"title\"):\r\n\t\t\t\t\tlibraryCurser.execute('''UPDATE books SET title = ? WHERE title = ? ''', (update, title))\r\n\r\n\t\t\t# give a recommendation if title does not exists\r\n\t\t\telse:\r\n\t\t\t\tresult = recommendations\r\n\r\n\t\t\tlibrary.commit()\r\n\t\t\tlibrary.close()\r\n\t\texcept:\r\n\t\t\tresult = \"There was an error Processing your request, please try again\"\r\n\r\n\t\t#LOGGING\r\n\t\tLibraryLog = open(\"LibraryLog.log\", \"a\")\r\n\t\tLibraryLog.writelines(datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\") + \" : \" + self.get_user_name(self.user)+\" CHANGED FIELD \" + field + \" TO \" + update + \"ON BOOK\" + title + \"\\n\")\r\n\t\tLibraryLog.close()\r\n\t\treturn result\r\n\r\n\tdef checkoutBook(self,parameters):\r\n\t\tfieldREGEX = re.compile('(\\\".*?\\\").*')\r\n\t\tresult = \"Book Checked Out\"\r\n\r\n\t\t#SQLLITE CONNECTION\r\n\t\tlibrary = sqlite3.connect('library')\r\n\t\tlibraryCurser = library.cursor()\r\n\r\n\t\t#PARAMETER PARSING\r\n\t\ttry:\r\n\t\t\ttitle = fieldREGEX.match(parameters).group(1)\r\n\t\texcept:\r\n\t\t\treturn \"Invalid Parameters\\n checkout [\\\"title\\\"]\\n\"\r\n\t\ttitle = title.replace(\"\\\"\", \"\")\r\n\r\n\t\t#SQL MAGIC\r\n\t\ttry:\r\n\t\t\t#check if the title exists and gather recommendations\r\n\t\t\trecommendations = self.getRecommendations(title)\r\n\r\n\t\t\t# if title exists, checkout the book\r\n\t\t\tif(recommendations == \"MATCH\"):\r\n\t\t\t\tlibraryCurser.execute('''UPDATE books SET checkedOutBy = ? WHERE title = ? ''',\r\n\t\t\t\t\t\t\t\t\t (self.user, title))\r\n\t\t\t\tlibraryCurser.execute('''UPDATE books SET checkoutDate= ? WHERE title = ? ''',\r\n\t\t\t\t\t\t\t\t\t (datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\"), title))\r\n\t\t\t\tlibrary.commit()\r\n\t\t\t\tlibrary.close()\r\n\r\n\t\t\t\t# LOGGING\r\n\t\t\t\tLibraryLog = open(\"LibraryLog.log\", \"a\")\r\n\t\t\t\tLibraryLog.writelines(datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\") + \" : \" + self.get_user_name(self.user) + \" CHECKED OUT \" + title + \"\\n\")\r\n\t\t\t\tLibraryLog.close()\r\n\r\n\t\t\t# give a recommendation if title does not exists\r\n\t\t\telse:\r\n\t\t\t\tresult = recommendations\r\n\t\texcept:\r\n\t\t\tresult = \"There was an error while processing your request, please try again\"\r\n\r\n\t\treturn result\r\n\r\n\tdef returnBook(self,parameters):\r\n\t\tfieldREGEX = re.compile('(\\\".*?\\\").*')\r\n\t\tresult = \"BOOK RETURNED\"\r\n\r\n\t\t#SQLLITE CONNECTION\r\n\t\tlibrary = sqlite3.connect('library')\r\n\t\tlibraryCurser = library.cursor()\r\n\r\n\t\t#PARAMETER PARSING\r\n\t\ttry:\r\n\t\t\ttitle = fieldREGEX.match(parameters).group(1)\r\n\t\texcept:\r\n\t\t\treturn \"Invalid Parameters\\n checkout [\\\"title\\\"]\\n\"\r\n\t\ttitle = title.replace(\"\\\"\", \"\")\r\n\r\n\t\t#SQL MAGIC\r\n\t\ttry:\r\n\t\t\t# check if the title exists and gather recommendations\r\n\t\t\trecommendations = self.getRecommendations(title)\r\n\r\n\t\t\t# if title exists, checkout the book\r\n\t\t\tif (recommendations == \"MATCH\"):\r\n\t\t\t\tlibraryCurser.execute('''UPDATE books SET checkedOutBy = ? WHERE title = ? ''', (\"Available\", title))\r\n\t\t\t\tlibraryCurser.execute('''UPDATE books SET checkoutDate= ? WHERE title = ? ''', (\"\", title))\r\n\t\t\t\tlibrary.commit()\r\n\t\t\t\tlibrary.close()\r\n\r\n\t\t\t\t# LOGGING\r\n\t\t\t\tLibraryLog = open(\"LibraryLog.log\", \"a\")\r\n\t\t\t\tLibraryLog.writelines(\r\n\t\t\t\tdatetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\") + \" : \" + self.get_user_name(self.user) + \" RETURNED \" + title + \"\\n\")\r\n\t\t\t\tLibraryLog.close()\r\n\r\n\t\t\t# give a recommendation if title does not exists\r\n\t\t\telse:\r\n\t\t\t\tresult = recommendations\r\n\t\texcept:\r\n\t\t\tresult = \"There was an error while processing your request, please try again.\"\r\n\t\treturn result\r\n\r\n\tdef removeBook(self, parameters):\r\n\t\tfieldREGEX = re.compile('(\\\".*?\\\").*')\r\n\t\tresult = \"Book Has Been Removed From the Library\"\r\n\r\n\t\t#SQLLITE CONNECTION\r\n\t\tlibrary = sqlite3.connect('library')\r\n\t\tlibraryCurser = library.cursor()\r\n\r\n\t\t# PARAMETER PARSING\r\n\t\ttry:\r\n\t\t\ttitle = fieldREGEX.match(parameters).group(1)\r\n\t\texcept:\r\n\t\t\treturn \"Invalid Parameters\\n removebook [\\\"title\\\"]\\n\"\r\n\t\ttitle = title.replace(\"\\\"\", \"\")\r\n\t\t# SQL MAGIC\r\n\t\ttry:\r\n\t\t\t# check if the title exists and gather recommendations\r\n\t\t\trecommendations = self.getRecommendations(title)\r\n\r\n\t\t\t# if title exists, checkout the book\r\n\t\t\tif (recommendations == \"MATCH\"):\r\n\t\t\t\tlibraryCurser.execute('''DELETE FROM books WHERE title = ? ''', (title,))\r\n\t\t\t\tlibrary.commit()\r\n\t\t\t\tlibrary.close()\r\n\r\n\t\t\t\t# LOGGING\r\n\t\t\t\tLibraryLog = open(\"LibraryLog.log\", \"a\")\r\n\t\t\t\tLibraryLog.writelines(datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\") + \" : \" + self.get_user_name(self.user) + \" REMOVED \" + title + \"\\n\")\r\n\t\t\t\tLibraryLog.close()\r\n\r\n\t\t\t# give a recommendation if title does not exists\r\n\t\t\telse:\r\n\t\t\t\tresult = recommendations\r\n\t\texcept:\r\n\t\t\tresult = \"There was an error processing your request, please try again\"\r\n\r\n\t\treturn result\r\n\r\n\tdef help(self, parameters):\r\n\t\tresponse = \"Currently I support the following commands:\\r\\n\"\r\n\t\t\r\n\t\tresponse +=\"addbook [\\\"title\\\"]\\n\" \\\r\n\t\t\t\t \"showlibrary\\n\" \\\r\n\t\t\t\t \"editbook [\\\"title\\\"] [\\\"field\\\"] [\\\"fieldUpdate\\\"]\\n\" \\\r\n\t\t\t\t \"checkout [\\\"title\\\"]\\n\" \\\r\n\t\t\t\t \"return [\\\"title\\\"]\\n\" \\\r\n\t\t\t\t \"removebook [\\\"title\\\"]\\n\" \\\r\n\t\t\t\t \"help\"\r\n\r\n\t\treturn response\r\n\r\n\t#USES THE SLACK API TO CONVERT USER ID TO USERNAME\r\n\tdef get_user_name(self, id):\r\n\t\tif(id == \"Available\"):\r\n\t\t\treturn \"Available\"\r\n\t\tid = self.cleanInput(id)\r\n\t\tapi_call = self.slackCLient.api_call(\"users.list\")\r\n\t\tif api_call.get('ok'):\r\n\t\t\t# retrieve all users so we can find the user\r\n\t\t\tusers = api_call.get('members')\r\n\t\t\tfor user in users:\r\n\t\t\t\tif user.get('id').lower() == id.lower():\r\n\t\t\t\t\treturn \"<@\" + user.get('name') + \">\"\r\n\t\t\treturn id\r\n\r\n\t#USES THE SLACK API TO CONVERT USERNAME TO ID\r\n\tdef get_user_ID(self, name):\r\n\t\tname = self.cleanInput(name)\r\n\t\tapi_call = self.slackCLient.api_call(\"users.list\")\r\n\t\tif api_call.get('ok'):\r\n\t\t\t# retrieve all users so we can find the user\r\n\t\t\tusers = api_call.get('members')\r\n\t\t\tfor user in users:\r\n\t\t\t\tif user.get('name').lower() == name.lower():\r\n\t\t\t\t\treturn user.get('id')\r\n\t\t\treturn name\r\n\r\n\tdef cleanInput(self,input):\r\n\t\tinput = input.replace(\"\\\"\", \"\")\r\n\t\tinput = input.replace(\"@\", \"\")\r\n\t\tinput = input.replace(\"<\", \"\")\r\n\t\tinput = input.replace(\">\", \"\")\r\n\t\tinput = input.strip()\r\n\t\treturn input\r\n\r\n\tdef cosineSimularity(self, item, query):\r\n\t\titem = item.lower()\r\n\t\tquery = query.lower()\r\n\r\n\t\tresult = \"\"\r\n\t\titemsVector = [0] * 128\r\n\t\tqueryVector = [0] * 128\r\n\r\n\t\t#turn the query into a vector\r\n\t\tqueryInChar = list(query)\r\n\t\tfor char in queryInChar:\r\n\t\t\tqueryVector[ord(char)] += 1\r\n\r\n\t\t#turn each item into a vector and compare\r\n\t\titemInChar = list(item)\r\n\t\tfor char in itemInChar:\r\n\t\t\titemsVector[ord(char)] += 1\r\n\t\t#Get Cosine Simularity and compare with threshold\r\n\t\tcompareResult = 1 - spatial.distance.cosine(itemsVector, queryVector) #cosine simularity to compare vectors\r\n\t\tif(compareResult > .80):\r\n\t\t\tresult = item\r\n\t\treturn result\r\n\r\n\tdef getRecommendations(self, title):\r\n\t\t#SQLLITE CONNECTION\r\n\t\tlibrary = sqlite3.connect('library')\r\n\t\tlibraryCurser = library.cursor()\r\n\t\trecommendations = []\r\n\r\n\t\t# Check if the title exists and collect recommendations\r\n\t\tlibraryCurser.execute('''SELECT title FROM books''')\r\n\t\tfor row in libraryCurser.fetchall():\r\n\t\t\trecommendation = self.cosineSimularity(row[0], title)\r\n\t\t\tif (recommendation != \"\"):\r\n\t\t\t\trecommendations.append(recommendation)\r\n\t\t\tif (title.lower() == row[0].lower()):\r\n\t\t\t\treturn \"MATCH\"\r\n\r\n\t\tif(not recommendations):\r\n\t\t\tresult = \"Could not find title \" + \"\\\"\" + title + \"\\\"\"\r\n\t\telse:\r\n\t\t\tresult = \"Could not find title \" + \"\\\"\" + title + \"\\\"\" + \" did you mean?\\n\"\r\n\t\t\tfor item in recommendations:\r\n\t\t\t\tresult += item + \"\\n\"\r\n\r\n\t\tlibraryCurser.close()\r\n\t\tlibrary.close()\r\n\t\treturn result\r\n\r\n\t#This function was implemented to correct entries in the database that were originally entered as the username instead of user ID\r\n\tdef fixLibraryUsers(self):\r\n\t\tlibrary = sqlite3.connect('library')\r\n\t\tlibraryCurser = library.cursor()\r\n\t\ttry:\r\n\t\t\tlibraryCurser.execute('''SELECT title, author, owner, checkedOutBy, checkoutDate FROM books''')\r\n\t\texcept:\r\n\t\t\treturn \"There was an error while processing your request, please try again.\"\r\n\t\tfor row in libraryCurser.fetchall():\r\n\t\t\tlibraryCurser.execute('''UPDATE books SET owner = ? WHERE title = ? ''', (self.get_user_ID(row[2]), row[0]))\r\n\t\t\tlibraryCurser.execute('''UPDATE books SET checkedOutBy = ? WHERE title = ? ''', (self.get_user_ID(row[3]), row[0]))\r\n\t\tlibraryCurser.close()\r\n\t\tlibrary.commit()\r\n\t\tprint (\"Library Users Updated\")\r\n", "sub_path": "command.py", "file_name": "command.py", "file_ext": "py", "file_size_in_byte": 12308, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 87, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 104, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 153, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 157, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 177, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 177, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 183, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 195, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 224, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 235, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 239, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 261, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 261, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 337, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 337, "usage_type": "attribute"}, {"api_name": "scipy.spatial", "line_number": 337, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 344, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 370, "usage_type": "call"}]} +{"seq_id": "331453959", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'Martin Pihrt'\n# This plugin read data from probe DHT11 (temp and humi). # Raspberry Pi pin 19 as GPIO 10\n\nimport json\nimport time\nimport traceback\nimport os\nfrom threading import Thread, Event\n\nimport web\n\nfrom ospy.log import log\nfrom plugins import PluginOptions, plugin_url, plugin_data_dir\nfrom ospy.webpages import ProtectedPage\nfrom ospy.helpers import get_rpi_revision\nfrom ospy.helpers import datetime_string\n\nimport RPi.GPIO as GPIO\n\nimport i18n\n\nNAME = 'Air Temperature and Humidity Monitor'\nLINK = 'settings_page'\n\nplugin_options = PluginOptions(\n NAME,\n {'enabled': False,\n 'enable_log': False,\n 'log_interval': 1,\n 'log_records': 0,\n 'label': 'Air Probe'\n }\n)\n\n\n################################################################################\n# Main function loop: #\n################################################################################\n\n\nclass Sender(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.daemon = True\n self._stop = Event()\n\n self.status = {}\n self.status['temp'] = 0\n self.status['humi'] = 0\n\n self._sleep_time = 0\n self.start()\n\n def stop(self):\n self._stop.set()\n\n def update(self):\n self._sleep_time = 0\n\n def _sleep(self, secs):\n self._sleep_time = secs\n while self._sleep_time > 0 and not self._stop.is_set():\n time.sleep(1)\n self._sleep_time -= 1\n\n def run(self):\n Temperature = 0\n Humidity = 0 \n while not self._stop.is_set():\n log.clear(NAME)\n try:\n if plugin_options['enabled']: # if plugin is enabled \n try:\n Temperature, Humidity = DHT11_read_data()\n except:\n self._sleep(0.3) \n \n if Humidity and Temperature != 0:\n self.status['temp'] = Temperature\n self.status['humi'] = Humidity\n log.info(NAME, datetime_string())\n log.info(NAME, _('Temperature') + ': ' + u'%.1f \\u2103' % Temperature)\n log.info(NAME, _('Humidity') + ': ' + u'%.1f' % Humidity + ' %RH')\n\n if plugin_options['enable_log']:\n update_log(self.status)\n\n self._sleep(max(60, plugin_options['log_interval'] * 60))\n\n except Exception:\n log.error(NAME, _('Air Temperature and Humidity Monitor plug-in') + ':\\n' + traceback.format_exc())\n self._sleep(60)\n\nsender = None\n\n################################################################################\n# Helper functions: #\n################################################################################\ndef start():\n global sender\n if sender is None:\n sender = Sender()\n \n\ndef stop():\n global sender\n if sender is not None:\n sender.stop()\n sender.join()\n sender = None \n\ndef bin2dec(string_num):\n return str(int(string_num, 2))\n\ndef DHT11_read_data():\n data = [] \n \n GPIO.setup(19,GPIO.OUT) # pin 19 GPIO10\n GPIO.output(19,True)\n time.sleep(0.025)\n GPIO.output(19,False)\n time.sleep(0.02)\n GPIO.setup(19, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n for i in range(0,500):\n data.append(GPIO.input(19))\n \n bit_count = 0\n tmp = 0\n count = 0\n HumidityBit = \"\"\n TemperatureBit = \"\"\n crc = \"\"\n\n try:\n while data[count] == 1:\n tmp = 1\n count = count + 1\n\n for i in range(0, 32):\n bit_count = 0\n\n while data[count] == 0:\n tmp = 1\n count = count + 1\n \n while data[count] == 1:\n bit_count = bit_count + 1\n count = count + 1 \n\n if bit_count > 3:\n if i>=0 and i<8:\n HumidityBit = HumidityBit + \"1\"\n if i>=16 and i<24:\n TemperatureBit = TemperatureBit + \"1\"\n else:\n if i>=0 and i<8:\n HumidityBit = HumidityBit + \"0\"\n if i>=16 and i<24:\n TemperatureBit = TemperatureBit + \"0\"\n \n for i in range(0,8):\n bit_count = 0\n\n while data[count] == 0: \n tmp = 1\n count = count + 1\n\n while data[count] == 1:\n bit_count = bit_count + 1\n count = count + 1\n\n if bit_count > 3:\n crc = crc + \"1\"\n else:\n crc = crc + \"0\"\n\n Humidity = bin2dec(HumidityBit)\n Temperature = bin2dec(TemperatureBit)\n\n if int(Humidity) + int(Temperature) - int(bin2dec(crc)) == 0:\n return int(Temperature),int(Humidity) \n except:\n time.sleep(0.5)\n \n\n \ndef read_log():\n \"\"\"Read log from json file.\"\"\"\n try:\n with open(os.path.join(plugin_data_dir(), 'log.json')) as logf:\n return json.load(logf)\n except IOError:\n return []\n\n\ndef write_log(json_data):\n \"\"\"Write json to log file.\"\"\"\n with open(os.path.join(plugin_data_dir(), 'log.json'), 'w') as outfile:\n json.dump(json_data, outfile)\n\n\ndef update_log(status):\n log_data = read_log()\n data = {'datetime': datetime_string()}\n data['temp'] = str(status['temp'])\n data['humi'] = str(status['humi'])\n log_data.insert(0, data)\n if plugin_options['log_records'] > 0:\n log_data = log_data[:plugin_options['log_records']]\n write_log(log_data)\n\n\n################################################################################\n# Web pages: #\n################################################################################\n\nclass settings_page(ProtectedPage):\n \"\"\"Load an html page for entering adjustments.\"\"\"\n\n def GET(self):\n return self.plugin_render.air_temp_humi(plugin_options, log.events(NAME))\n\n def POST(self):\n plugin_options.web_update(web.input())\n\n if sender is not None:\n sender.update() \n raise web.seeother(plugin_url(settings_page), True)\n\n\nclass settings_json(ProtectedPage):\n \"\"\"Returns plugin settings in JSON format.\"\"\"\n\n def GET(self):\n web.header('Access-Control-Allow-Origin', '*')\n web.header('Content-Type', 'application/json')\n return json.dumps(plugin_options)\n\n\nclass log_json(ProtectedPage):\n \"\"\"Returns plugin settings in JSON format.\"\"\"\n\n def GET(self):\n web.header('Access-Control-Allow-Origin', '*')\n web.header('Content-Type', 'application/json')\n return json.dumps(read_log())\n\n\nclass log_csv(ProtectedPage): # save log file from web as csv file type\n \"\"\"Simple Log API\"\"\"\n\n def GET(self):\n log_records = read_log()\n data = \"Date/Time\"\n data += \";\\t Temperature C\"\n data += \";\\t Humidity %RH\" \n data += '\\n'\n\n for record in log_records:\n data += record['datetime']\n data += \";\\t\" + record[\"temp\"]\n data += \";\\t\" + record[\"humi\"]\n data += '\\n'\n\n web.header('Content-Type', 'text/csv')\n return data\n\n\nclass delete_log_page(ProtectedPage): # delete log file from web\n \"\"\"Delete all log_records\"\"\"\n\n def GET(self):\n write_log([])\n log.info(NAME, _('Deleted log file'))\n raise web.seeother(plugin_url(settings_page), True)\n\n\n", "sub_path": "plugins/air_temp_humi/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 7722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "plugins.PluginOptions", "line_number": 27, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 43, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 45, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 45, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "ospy.log.log.clear", "line_number": 72, "usage_type": "call"}, {"api_name": "ospy.log.log", "line_number": 72, "usage_type": "name"}, {"api_name": "ospy.log.log.info", "line_number": 83, "usage_type": "call"}, {"api_name": "ospy.log.log", "line_number": 83, "usage_type": "name"}, {"api_name": "ospy.helpers.datetime_string", "line_number": 83, "usage_type": "call"}, {"api_name": "ospy.log.log.info", "line_number": 84, "usage_type": "call"}, {"api_name": "ospy.log.log", "line_number": 84, "usage_type": "name"}, {"api_name": "ospy.log.log.info", "line_number": 85, "usage_type": "call"}, {"api_name": "ospy.log.log", "line_number": 85, "usage_type": "name"}, {"api_name": "ospy.log.log.error", "line_number": 93, "usage_type": "call"}, {"api_name": "ospy.log.log", "line_number": 93, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 93, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 120, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 120, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 120, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 121, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 121, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 123, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 123, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 124, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 125, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 125, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 125, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PUD_UP", "line_number": 125, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.input", "line_number": 127, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 127, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 192, "usage_type": "attribute"}, {"api_name": "plugins.plugin_data_dir", "line_number": 192, "usage_type": "call"}, {"api_name": "json.load", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "plugins.plugin_data_dir", "line_number": 200, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 201, "usage_type": "call"}, {"api_name": "ospy.helpers.datetime_string", "line_number": 206, "usage_type": "call"}, {"api_name": "ospy.webpages.ProtectedPage", "line_number": 219, "usage_type": "name"}, {"api_name": "ospy.log.log.events", "line_number": 223, "usage_type": "call"}, {"api_name": "ospy.log.log", "line_number": 223, "usage_type": "name"}, {"api_name": "web.input", "line_number": 226, "usage_type": "call"}, {"api_name": "web.seeother", "line_number": 230, "usage_type": "call"}, {"api_name": "plugins.plugin_url", "line_number": 230, "usage_type": "call"}, {"api_name": "ospy.webpages.ProtectedPage", "line_number": 233, "usage_type": "name"}, {"api_name": "web.header", "line_number": 237, "usage_type": "call"}, {"api_name": "web.header", "line_number": 238, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 239, "usage_type": "call"}, {"api_name": "ospy.webpages.ProtectedPage", "line_number": 242, "usage_type": "name"}, {"api_name": "web.header", "line_number": 246, "usage_type": "call"}, {"api_name": "web.header", "line_number": 247, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 248, "usage_type": "call"}, {"api_name": "ospy.webpages.ProtectedPage", "line_number": 251, "usage_type": "name"}, {"api_name": "web.header", "line_number": 267, "usage_type": "call"}, {"api_name": "ospy.webpages.ProtectedPage", "line_number": 271, "usage_type": "name"}, {"api_name": "ospy.log.log.info", "line_number": 276, "usage_type": "call"}, {"api_name": "ospy.log.log", "line_number": 276, "usage_type": "name"}, {"api_name": "web.seeother", "line_number": 277, "usage_type": "call"}, {"api_name": "plugins.plugin_url", "line_number": 277, "usage_type": "call"}]} +{"seq_id": "377077671", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tues Nov 09 2021\n\n@author: Pat Taylor (pt409)\n\"\"\"\n#%% Libraries\nfrom dataclasses import replace\nfrom distutils.log import warn\nfrom typing_extensions import runtime\nimport numpy as np\nimport pandas as pd\nimport dill\nimport warnings\nimport ast\nimport functools\n\nfrom sklearn.metrics import r2_score,mean_squared_error\nfrom scipy.stats import pearsonr\n\nimport sklearn.gaussian_process as gp\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.gaussian_process.kernels import ConstantKernel\nfrom sklearn.decomposition import PCA\nfrom sklearn.utils.optimize import _check_optimize_result\nfrom joblib import Parallel,delayed\nfrom scipy.spatial.distance import pdist, cdist, squareform\nfrom scipy.optimize import minimize\nfrom scipy.linalg import logm,norm,block_diag,inv\nfrom scipy.special import erf,erfc\nfrom ase import data\nfrom ase.build import bulk\n# PyTorch, GPyTorch\nimport torch, gpytorch\n\nimport matplotlib.pyplot as plt\n\nfrom copy import deepcopy,copy\n\nfrom pprint import pprint\n\nfrom shells import shell_radii\n\nv = 2 # global output verbosity\nif v < 3:\n warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)\n warnings.simplefilter(action='ignore', category=RuntimeWarning)\n\n#%% Data processing\ndef check_if_valid(input_,allow_all_null=False):\n \"\"\"\n Helper function to check if database rows are valid to process.\n\n Parameters\n ----------\n input_ (pd.Series) : data series to check.\n allow_all_null (bool) : Whether a full row of null entries (\"-\") is to count as valid.\n\n Return\n ------\n input_ (pd.Series) : float64 version of input_ parameter.\n rtn_code (int) : A return code denoting validity of input. \n \"\"\"\n if np.prod(\"-\" == input_) and not allow_all_null:\n return input_, 0 # Invalid input, all entries are -\n else :\n input_ = np.where(input_ == \"-\",0,input_).astype(np.float64)\n if np.isnan(input_.sum()):\n return input_, 1 # Invalid input, (at least one) empty column\n else :\n return input_, 2 # Valid input\n\ndef get_microstructure_data(df,drop_dupli=False,shuffle_seed=None):\n \"\"\"\n Extract all entries from databse with complete microstructure information. \n Here \"complete\" means complete composition + precipitate fraction + phase\n composition data. \n\n Parameters\n ----------\n df (pd.DataFrame) : dataframe to extract complete entries from.\n drop_dupli (bool) : Whether to retain any duplicate compositions that are found.\n shuffle_seed (int) : Random seed to use to shuffle database entries' orders.\n \n Return\n ms_df (pd.Dataframe): dataframe of complete entries only. \n \"\"\"\n ms_df = df.copy()\n drop_indices = []\n for index,row in df.iterrows():\n ht, ht_code = check_if_valid(row[(\"Precipitation heat treatment\")],allow_all_null=True)\n if ht_code == 2:\n comp, comp_code = check_if_valid(row[(\"Composition\",\"at. %\")])\n if comp_code == 2:\n frac, frac_code = check_if_valid(row[(\"γ’ fraction\",\"at. %\")])\n if frac_code == 2:\n prc, prc_code = check_if_valid(row[(\"γ’ composition\",\"at. %\")])\n if prc_code == 2:\n mtx, mtx_code = check_if_valid(row[(\"γ composition\",\"at. %\")])\n if mtx_code == 2:\n continue\n drop_indices += [index]\n ms_df.drop(drop_indices,inplace=True)\n if drop_dupli:\n ms_df = ms_df.loc[ms_df[(\"Composition\",\"at. %\")].drop_duplicates().index]\n # Shuffle database and select a specified fraction of it:\n ms_df=ms_df.sample(frac=1.,random_state=shuffle_seed).reset_index(drop=True)\n return ms_df\n\ndef get_Xy(df,y_header,drop_els=[],\n min_max=None,drop_na=True,flatten=False,ht=False,log_y=False,\n ht_function = None):\n \"\"\"\n Use in conjunction with get_microstructure to get the X,y data for ML.\n \n Parameters:\n -----------\n df (pd.DataFrame) : Dataframe to process. \n y_header (tuple,str): DataFrame column name for y data to extract. Entered as a tuple for multiindex compatibility.\n If y_header=None, this function just returns all the X data.\n drop_els (list,str) : List of element names to drop from X (composition) data to extract\n min_max (list,float): Min and max cutoff values for databse entries to extract.\n drop_na (bool) : Drop empty rows (in y) from returned database.\n flatten (bool) : Whether to return y as shape (n,1) [FALSE] or (n,) [TRUE].\n ht (bool) : Whether to include heat treatment as well as composition data in X.\n log_y (bool) : Return logarithm of y data.\n ht_function (lambda): Lambda function to apply to heat treatment data if used.\n\n Return:\n -------\n X (ndarray) : X data extracted from DataFrame. \n y (ndarray) : y data extracted from DataFrame. Not returned if y_header=None. \n \"\"\"\n\n if len(drop_els)==0: \n drop_els=None\n elif drop_els[0]==\"\": \n drop_els=None\n elif drop_els[-1]==\"\": \n drop_els=drop_els[:-1]\n # Enter header as tuple in case of multiindex\n if y_header:\n # drop rows less/greater than certain min/max values\n if drop_na:\n sub_df = df.dropna(subset=y_header)\n else:\n sub_df = df.copy()\n if min_max:\n min_, max_ = tuple(min_max)\n if isinstance(min_,float): \n condition_0 = (sub_df != False)\n condition = sub_df[y_header].astype(\"float64\") > min_ # Min\n condition_0.update(condition)\n sub_df = sub_df[condition_0].dropna(subset=y_header)\n if isinstance(max_,float): \n condition_0 = (sub_df != False)\n condition = sub_df[y_header].astype(\"float64\") < max_ # Max\n condition_0.update(condition)\n sub_df = sub_df[condition_0].dropna(subset=y_header)\n # Now drop empty rows\n # Start getting data here:\n y = sub_df.loc[:,y_header].astype(\"float64\").values\n if flatten and len(y.shape) > 1 and y.shape[-1] == 1:\n y = y.flatten()\n if log_y:\n y = np.log(y)\n else:\n sub_df = df.copy()\n if drop_els:\n X1 = 0.01*(sub_df.loc[:,(\"Composition\",\"at. %\")].drop(drop_els,axis=1).astype(\"float64\").values)\n else:\n X1 = 0.01*(sub_df.loc[:,(\"Composition\",\"at. %\")].astype(\"float64\").values)\n if ht:\n X0 = sub_df.loc[:,(\"Precipitation heat treatment\")]\n col_order = sorted(X0.columns.tolist(),key = lambda h: h[1])\n X0 = X0[col_order].replace(\"-\",0.0).astype(np.float64).values\n X0[:,:3] += 273.\n if ht_function:\n X0 = ht_function(X0)\n X = np.append(X0,X1,axis=1)\n else:\n X = X1\n if y_header:\n return X,y\n else:\n return X\n\n#%% GPR\nclass customGPR(GaussianProcessRegressor):\n \"\"\"\n Modification of the sklearn parent class that adds explicit max_iter argument.\n \"\"\"\n def __init__(self, \n kernel=None,\n *,\n alpha=1e-10,\n optimizer=\"fmin_l_bfgs_b\",\n n_restarts_optimizer=0,\n normalize_y=False,\n copy_X_train=True,\n random_state=None,\n max_iter=15000):\n super().__init__(kernel,\n alpha=alpha,\n optimizer=optimizer,\n n_restarts_optimizer=n_restarts_optimizer,\n normalize_y=normalize_y,\n copy_X_train=copy_X_train,\n random_state=random_state)\n self.max_iter = max_iter\n \n def _constrained_optimization(self, obj_func, initial_theta, bounds):\n if self.optimizer == \"fmin_l_bfgs_b\":\n opt_res = minimize(\n obj_func,\n initial_theta,\n method=\"L-BFGS-B\",\n jac=True,\n bounds=bounds,\n options={\"maxiter\":self.max_iter}\n )\n _check_optimize_result(\"lbfgs\", opt_res)\n theta_opt, func_min = opt_res.x, opt_res.fun\n elif callable(self.optimizer):\n theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)\n else:\n raise ValueError(f\"Unknown optimizer {self.optimizer}.\")\n\n return theta_opt, func_min\n\n#%% Kernels\n# All of the following are modified from sklearn parent classes.\n\nclass Linear(gp.kernels.DotProduct):\n \"\"\"\n Modification of DotProduct kernel from sklearn. Allows for simple dimensionality\n reduction to account for only part of X being used. \n \"\"\"\n def __init__(self,sigma_0=1.0,sigma_0_bounds=(1.e-5,1.e5),\n dims=15,dim_range=None,comp=False):\n super(Linear,self).__init__(sigma_0,sigma_0_bounds)\n self.dims = dims\n self.dim_range = dim_range\n self.comp = comp\n self.constr_trans()\n \n def constr_trans(self):\n A = np.eye(self.dims)\n if self.dim_range: \n A = A[self.dim_range[0]:self.dim_range[1],:]\n if self.comp: \n A = np.r_[[np.append(np.zeros(self.dim_range[0]),np.ones(self.dims-self.dim_range[0]))],A]\n A = A.T # Use transpose since vectors are represented by rows not columns.\n self.A = A\n \n def trans(self,X):\n return X@self.A\n \n def __call__(self, X, Y=None, eval_gradient=False):\n X = np.atleast_2d(X)\n X = self.trans(X)\n if Y is None:\n K = np.inner(X, X) + self.sigma_0 ** 2\n else:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated when Y is None.\")\n Y = self.trans(Y)\n K = np.inner(X, Y) + self.sigma_0 ** 2\n\n if eval_gradient:\n if not self.hyperparameter_sigma_0.fixed:\n K_gradient = np.empty((K.shape[0], K.shape[1], 1))\n K_gradient[..., 0] = 2 * self.sigma_0 ** 2\n return K, K_gradient\n else:\n return K, np.empty((X.shape[0], X.shape[0], 0))\n else:\n return K\n \n def diag(self, X):\n X = self.trans(X)\n return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2\n \nclass PCALinear(Linear):\n \"\"\"\n Further modification of sklearn DotProduct kernel. Allows for further dimensionality\n reduction via an explicit PC projection matrix. \n \"\"\"\n def __init__(self,sigma_0=1.0,sigma_0_bounds=(1.e-5,1.e5),\n pca_components=None,use_inv=False,\n dims=15,dim_range=None,comp=False):\n self.pca_components = pca_components\n self.use_inv = use_inv\n super(PCALinear,self).__init__(sigma_0,sigma_0_bounds,dims,dim_range,comp=comp)\n \n def constr_trans(self):\n Ilike = np.eye(self.dims)\n if self.dim_range: \n Ilike = Ilike[self.dim_range[0]:self.dim_range[1],:]\n if self.comp: \n Ilike = np.r_[[np.append(np.zeros(self.dim_range[0]),np.ones(self.dims-self.dim_range[0]))],Ilike] # Subspace projection part of matrix\n self.A = (self.pca_components @ Ilike).T\n \n def trans(self,X):\n if self.use_inv:\n return (X@self.A)**-1\n else:\n return X@self.A\n \nclass L2RBF(gp.kernels.RBF):\n \"\"\"\n Modification of RBF kernel from sklearn. Allows for simple dimensionality\n reduction to account for only part of X being used. \n \"\"\"\n def __init__(self,length_scale=1.0,length_scale_bounds=(1.e-5,1.e5),\n dims=15,dim_range=None,comp=False):\n super(L2RBF,self).__init__(length_scale,length_scale_bounds)\n # Matrix used to transform vectors in call.\n self.dims = dims\n self.dim_range = dim_range\n self.comp = comp\n self.constr_trans()\n \n def constr_trans(self):\n A = np.eye(self.dims)\n if self.dim_range: \n A = A[self.dim_range[0]:self.dim_range[1],:]\n if self.comp: \n A = np.r_[[np.append(np.zeros(self.dim_range[0]),np.ones(self.dims-self.dim_range[0]))],A]\n A = A.T # Use transpose since vectors are represented by rows not columns.\n self.A = A\n \n def trans(self,X):\n return X@self.A\n \n def __call__(self, X, Y=None, eval_gradient=False):\n X = np.atleast_2d(X)\n X = self.trans(X)\n length_scale = gp.kernels._check_length_scale(X, self.length_scale)\n if Y is None:\n dists = pdist(X / length_scale, metric='sqeuclidean')\n K = np.exp(-.5 * dists)\n # convert from upper-triangular matrix to square matrix\n K = squareform(K)\n np.fill_diagonal(K, 1)\n else:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated when Y is None.\")\n Y = self.trans(Y)\n dists = cdist(X / length_scale, Y / length_scale,\n metric='sqeuclidean')\n K = np.exp(-.5 * dists)\n\n if eval_gradient:\n if self.hyperparameter_length_scale.fixed:\n # Hyperparameter l kept fixed\n return K, np.empty((X.shape[0], X.shape[0], 0))\n elif not self.anisotropic or length_scale.shape[0] == 1:\n K_gradient = \\\n (K * squareform(dists))[:, :, np.newaxis]\n return K, K_gradient\n elif self.anisotropic:\n # We need to recompute the pairwise dimension-wise distances\n K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \\\n / length_scale\n K_gradient *= K[..., np.newaxis]\n return K, K_gradient\n else:\n return K\n\nclass PCAL2RBF(L2RBF):\n \"\"\"\n Further modification of sklearn RBF kernel. Allows for further dimensionality\n reduction via an explicit PC projection matrix. \n \"\"\"\n def __init__(self,length_scale=1.0,length_scale_bounds=(1.e-5,1.e5),\n pca_components=None,\n dims=15,dim_range=None,comp=False):\n self.pca_components = pca_components\n super(PCAL2RBF,self).__init__(length_scale,length_scale_bounds,\n dims,dim_range,comp)\n \n def constr_trans(self):\n Ilike = np.eye(self.dims)\n if self.dim_range: \n Ilike = Ilike[self.dim_range[0]:self.dim_range[1],:]\n if self.comp: \n Ilike = np.r_[[np.append(np.zeros(self.dim_range[0]),np.ones(self.dims-self.dim_range[0]))],Ilike] # Subspace projection part of matrix\n self.A = (self.pca_components @ Ilike).T\n\n#%% SCALER\nclass PartScaler():\n \"\"\"\n Based on sklearn.preprocessing.StandardScaler . Allows for scaling of part of an \n input. \n\n Parameters\n ----------\n scale_range (list,int) : Start end end indices for parts of input that are to be scaled.\n copy_ (bool) : Whether to make a copy of the input when transforming.\n with_mean (bool) : Whether to remove mean value during transformation.\n with_std (bool) : Whether to divide by std during transformation. \n \"\"\"\n def __init__(self, scale_range=None, copy_=True,with_mean=True,with_std=True):\n self.with_mean = with_mean\n self.with_std = with_std\n self.copy_ = copy_\n self.range_ = scale_range\n \n def _reset(self):\n if hasattr(self,'scale_'):\n del self.scale_\n del self.offset_\n \n def fit(self,X):\n \"\"\"\n Fit scaling transformation to some input data.\n\n Parameters\n ----------\n X (ndarray) : data to fit transformation to. \n \"\"\"\n self._reset()\n if self.with_mean:\n if self.range_:\n self.offset_ = np.zeros(X.shape[1])\n self.offset_[self.range_[0]:self.range_[1]] = np.mean(X[:,self.range_[0]:self.range_[1]],axis=0)\n else:\n self.offset_ = np.mean(X,axis=0) \n else: \n self.offset_ = 0.0\n \n if self.with_std:\n if self.range_:\n self.scale_ = np.ones(X.shape[1])\n self.scale_[self.range_[0]:self.range_[1]] = np.std(X[:,self.range_[0]:self.range_[1]],axis=0)\n else:\n self.scale_ = np.std(X,axis=0)\n self.scale_ = np.where(self.scale_==0.0,1.0,self.scale_)\n else:\n self.scale_ = 1.0\n return self\n \n def transform(self,X,copy_=None):\n \"\"\"\n Carry out transformation.\n \n Parameters\n ----------\n X (ndarray) : Input data to transform. \n\n Return\n ------\n X (ndarray) : Transformed data. \n \"\"\"\n copy_ = copy_ if copy_ is not None else self.copy_\n if copy_:\n X = X.copy()\n X -= self.offset_\n X /= self.scale_\n return X\n \n def inverse_transform(self,X,copy_=None):\n \"\"\"\n Carry out inverse transformation. \n\n Parameters\n ----------\n X (ndarray) : Transformed data to return to original representation. \n\n Return\n ------\n X (ndarray) : Data in original representation. \n \"\"\"\n copy_ = copy_ if copy_ is not None else self.copy_\n if copy_:\n X = X.copy()\n X *= self.scale_\n X += self.offset_\n return X\n \n def fit_transform(self,X,copy_=None):\n \"\"\"\n Fit and transform data in a single step. \n \"\"\"\n self.fit(X)\n return self.transform(X,copy_)\n\n#%% MODEL CLASSES\n\"\"\"\nREDACTED. See microstructure_gpr.py for these two classes.\nThey were based on an older idea of what the codebase should look like. \n\"\"\"\n\n#%% PLOTTING\n# Use to get colours for different models for each datapt, e.g. for plotting \ndef gen_colours(values):\n \"\"\"\n Use to get colours for different models for each datapoint, e.g. for plotting.\n\n Parameters\n ----------\n values (ndarray) : A code value for each point in a dataset.\n\n Returns\n -------\n colours (ndarray) : Colour index for each point in dataset. \n Will be as many colours as there were unique codes.\n key2col (dict) : Dictionary mapping codes (unique entries in values) to colour indices. \n \"\"\"\n values = np.squeeze(values)\n if len(values.shape) > 1:\n colour_dict = {code.tobytes():i for i,code in enumerate(np.unique(values,axis=0))}\n key2col = lambda code: colour_dict.get(code.tobytes())\n else:\n colour_dict = {code:i for i,code in enumerate(np.unique(values))}\n key2col = lambda code: colour_dict.get(code)\n colours = np.array(list(map(key2col,values)))\n return colours,key2col\n\ndef plot_byModel(f_true,f_pred,f_stds,\n name=\"precipitate fraction\",\n colour_src=None,\n colourmap=\"brg\",\n lims=None,\n label_outliers=None,\n data_labels=None):\n \"\"\"\n Quick function to plot predicted vs. true values. \n\n Parameters\n ----------\n f_true (ndarray) : The true values to plot.\n f_pred (ndarray) : The predicted values to plot.\n f_stds (ndarray) : Uncertainties for each prediciton.\n name (string) : Name of variable being plotted.\n colour_src (ndarray): Optional array of colour codes for each datapoint. \n colourmap (string) : Pyplot colourmap to use.\n lims (list) : Lower and upper limits for plot axes.\n label_outliers (float): If not none, outliers greater than this value will be labelled. \n data_labels (ndarray): Labels for each datapoint, will only be used if label_outliers!=None.\n \"\"\"\n fig,axs=plt.subplots()\n plt.errorbar(f_true,f_pred,yerr=f_stds,fmt=\".\",ecolor=\"k\",elinewidth=0.5,zorder=0)\n plt.scatter(f_true,f_pred,marker=\".\",c=colour_src,cmap=colourmap,zorder=10)\n if label_outliers:\n offset=0.05*np.median(f_pred)\n for f_true_i,f_pred_i,f_std_i,alloy_name in zip(f_true,f_pred,f_stds,data_labels):\n f_tol = f_std_i if isinstance(label_outliers,str) else label_outliers\n if np.abs(f_true_i-f_pred_i) > f_tol: \n axs.annotate(alloy_name,(f_true_i+offset,f_pred_i+offset),annotation_clip=False)\n if lims is None:\n lims = [min(axs.get_xlim()+axs.get_ylim()),max(axs.get_xlim()+axs.get_ylim())]\n axs.set_xlim(lims)\n axs.set_ylim(lims)\n axs.plot(lims,lims,\"--k\")\n axs.set_aspect(\"equal\",\"box\")\n axs.set_xlabel(\"Actual \"+name)\n axs.set_ylabel(\"Predicted \"+name)\n return fig,axs\n\n#%% Hume-Rothery transformations.\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DECORATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Design of decorators stolen from: \n# https://stackoverflow.com/questions/11731136/class-method-decorator-with-self-arguments\ndef inputOutputRule(transformRule):\n @functools.wraps(transformRule)\n def wrapper(*args,**kwargs):\n # First get relevant object instance.\n _self = args[0]\n # Check if special flag has been passed. \n dflag = kwargs.pop(\"dryrun\",False)\n if not _self.Xy_share_labels:\n raise RuntimeError(\"Cannot use transformation rules that assume a relation between input-output labels for datasets without such relations.\")\n else:\n if dflag:\n # Present some fake data and get shape of transformed data\n fake_X = np.c_[[np.random.rand(_self.aux_dims)],np.array([[0.95,.05]])]\n fake_nums = [29,50] \n fake_y = np.ones((1,1))\n out = transformRule(_self,fake_X,fake_nums,fake_y,**kwargs)\n out_dim = 1 if len(out.shape)==1 else out.shape[-1]\n return out_dim\n else:\n return transformRule(*args,**kwargs)\n return wrapper\n\ndef inputRule(transformRule):\n @functools.wraps(transformRule)\n def wrapper(*args,**kwargs):\n # First get relevant object instance.\n _self = args[0]\n # Check if special flag has been passed. \n dflag = kwargs.pop(\"dryrun\",False)\n if _self.Xy_share_labels:\n if dflag:\n # Present some fake data and get shape of transformed data\n fake_X = np.c_[[np.random.rand(_self.aux_dims)],np.array([[0.95,.05]])]\n fake_nums = [29,50] \n fake_y = np.ones((1,1))\n out = transformRule(_self,fake_X,fake_nums,fake_y,**kwargs)\n out_dim = 1 if len(out.shape)==1 else out.shape[-1]\n return out_dim\n else:\n X,at_nums,y = args[1:]\n X,y = _self.extend_original(X,y)\n return transformRule(_self,X,at_nums,y,**kwargs)\n elif _self.multi_output:\n if dflag:\n # Present some fake data and get shape of transformed data\n fake_X = np.c_[[np.random.rand(_self.aux_dims-1)],np.array([[0.95,.05]])]\n fake_nums = [29,50] \n fake_y = np.ones((1,_self.output_cats))\n fake_X,fake_y = _self.multi_2_single_output(fake_X,fake_y)\n out = transformRule(_self,fake_X,fake_nums,fake_y,**kwargs)\n out_dim = 1 if len(out.shape)==1 else out.shape[-1]\n return out_dim\n else:\n X,at_nums,y = args[1:]\n X,y = _self.multi_2_single_output(X,y)\n return transformRule(_self,X,at_nums,y,**kwargs)\n else:\n if dflag:\n fake_X = np.c_[[np.random.rand(_self.aux_dims)],np.array([[0.95,.05]])]\n fake_nums = [29,50] \n fake_y = np.ones((1,1))\n out = transformRule(_self,fake_X,fake_nums,fake_y,**kwargs)\n out_dim = 1 if len(out.shape)==1 else out.shape[-1]\n return out_dim\n else:\n return transformRule(*args,**kwargs)\n return wrapper\n\nclass HRrep_parent():\n \"\"\"\n Parent class that does NOT implement the transform(X[,y]) method!!!\n\n\n This class carries out the transformation into the Hume-Rothery basis.\n It has been written to work with sklearn.pipeline.Pipeline\n \"\"\"\n def __init__(self,*features,\n Xy_share_labels=False,\n aux_dims=None,\n multi_output=False,\n rdf_rep_params={}):\n \"\"\"\n Parameters\n ----------\n Xy_share_labels (bool) : Whether or not inputs and outputs are linked due to common labelling e.g. input = composition, output = partitioning coefficients\n aux_dims (int) : Number of dims at the START of the input that do not correspond to composition.\n multi_output (int) : Number of target columns, optional. Can't be used with Xy_share_labels\n\n Args\n ----\n features (strings) : strings correpsonding to the names of desired features in the representation.\n \"\"\"\n self.Xy_share_labels=Xy_share_labels\n self.aux_dims = aux_dims if aux_dims is not None else 0\n self.pt = np.genfromtxt(\"hr_table.csv\",delimiter=\",\",\n missing_values=(\"nan\",),filling_values=(np.nan,),\n skip_header=1,usecols=(2,3,4,5,8,9,10,11,12,13,14,\n 15,16,17,18,19,20,21,22,23,24,25,\n 26,27,28,29,30,31,32,\n 34,35))\n # Atomic properties\n self.cr = self.pt.T[0] # Covalent radius\n self.en = self.pt.T[2] # Electronegativities\n self.sg = self.pt.T[3].astype(\"int\") # Structure groups\n self.pf = self.pt.T[4] # Atomic packing factor\n self.wf = self.pt.T[5] # Work functions\n self.va = self.pt.T[6] # Valence (non-core electrons)\n self.mp = self.pt.T[29]# Modified pettifor scale\n self.am = self.pt.T[30]# Atomic mass\n self.orbitals = dict(zip([\"s\",\"p\",\"d\",\"f\"],self.pt.T[7:11]))\n # Pseudopotential radii\n self.pp_radii = dict(zip([\"1s\",\"2s\",\"2p\",\"3s\",\"3p\",\"3d\",\"4s\",\"4p\",\"4d\",\"5s\",\"5p\",\n \"5d\",\"5f\",\"6s\",\"6p\",\"6d\",\"7s\",\"4f\"],\n self.pt.T[11:29])) # girth is the only relevant feature \n s = [self.pp_radii[\"1s\"],self.pp_radii[\"2s\"],self.pp_radii[\"3s\"],self.pp_radii[\"4s\"],self.pp_radii[\"5s\"],self.pp_radii[\"6s\"],self.pp_radii[\"7s\"]]\n p = [self.pp_radii[\"2p\"],self.pp_radii[\"3p\"],self.pp_radii[\"4p\"],self.pp_radii[\"5p\"],self.pp_radii[\"6p\"]]\n d = [self.pp_radii[\"3d\"],self.pp_radii[\"4d\"],self.pp_radii[\"5d\"],self.pp_radii[\"6d\"]]\n f = [self.pp_radii[\"4f\"],self.pp_radii[\"5f\"]]\n self.pp_radii[\"s\"] = np.sum(s,axis=1)/np.count_nonzero(s,axis=1)\n self.pp_radii[\"p\"] = np.sum(p,axis=1)/np.count_nonzero(p,axis=1)\n self.pp_radii[\"d\"] = np.sum(d,axis=1)/np.count_nonzero(d,axis=1)\n self.pp_radii[\"f\"] = np.sum(f,axis=1)/np.count_nonzero(f,axis=1)\n # Elements used in fit.\n self.els_in_fit = None\n # Features used in representation. \n self.ft_names = features \n self.m_out = len(features)\n # create a basic rdf class instance.\n self.rdf_0 = self.RDF(self,**rdf_rep_params)\n # Multi-output stuff\n if isinstance(multi_output,bool) or multi_output==1:\n self.multi_output = multi_output\n self.output_cats = 0\n else:\n if self.Xy_share_labels:\n raise RuntimeError(\"Cannot have Xy_share_labels=True and multi_output > 1.\")\n self.multi_output = True\n self.output_cats = multi_output\n self.aux_dims += 1\n \n @staticmethod\n def get_els(dataframe,rtn_numbers=True):\n \"\"\"\n Work out which elements are in the dataframe.\n\n Parameters\n ----------\n dataframe (pd.Dataframe): Input dataframe\n rtn_numbers (bool) : Whether to return atomic numbers or element names.\n \"\"\"\n nlevels = dataframe.columns.nlevels\n if nlevels==3:\n els = dataframe.columns[\n dataframe.columns.get_locs((\"Composition\",\"at. %\"))].get_level_values(2).to_numpy()\n elif nlevels==2:\n els = dataframe.columns[\n dataframe.columns.get_locs((\"Composition\",))].get_level_values(1).to_numpy()\n elif nlevels==1:\n els = dataframe.columns.to_numpy()\n nums = [data.atomic_numbers.get(el) for el in els]\n if rtn_numbers:\n return nums\n else:\n return els\n \n #~~~~~~~~~~~~~~~~~~~~~~~~~ TRANSFORMER METHODS ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def fit(self,X,y):\n \"\"\"\n Doesn't do anything except work out which elements were included in \n the initial fit, and maybe work out number of output categories. \n \"\"\"\n self.els_in_fit = self.get_els(X,rtn_numbers=False)\n if self.multi_output:\n self.output_cats = self.multi_2_single_output(X.values,y.values,True)\n return self\n\n def transform(self,X,y):\n \"\"\"\n Raises a runtime error: this method is purposefully not implemented in \n this class. Use child class HRrep instead. \n \"\"\"\n raise RuntimeError(\"Use HRrep class instead\")\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RESHAPE METHODS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def extend_original(self,X,y=None):\n \"\"\"\n Maps the original representation from (N,m) array to (~N*m,m) array.\n \"\"\"\n N,m = X.shape\n m -= self.aux_dims\n mask = self._gen_mask(X)\n if y is None:\n return np.repeat(X,m,axis=0)[mask],None\n else:\n if isinstance(y,tuple):\n return tuple([np.repeat(X,m,axis=0)[mask]]\\\n +[y_.flatten()[mask] for y_ in y])\n else:\n return np.repeat(X,m,axis=0)[mask],y.flatten()[mask]\n\n def multi_2_single_output(self,X,y,get_cat_num_only=False):\n \"\"\"\n Similar to extend_original, except this works explicitly for any type of multi-output and generates a new\n label column.\n \"\"\"\n self.multi_output = True\n if y is not None:\n N,m = y.shape\n mask = ~(np.any([np.isnan(y.flatten()),np.isinf(y.flatten())\n ],0))\n else:\n N = X.shape[0]\n m = self.output_cats\n mask = np.ones(N*m).astype(bool)\n if get_cat_num_only:\n return m\n if y is not None:\n return np.c_[np.arange(N*m)%m,np.repeat(X,m,axis=0)][mask],y.flatten()[mask]\n else:\n return np.c_[np.arange(N*m)%m,np.repeat(X,m,axis=0)][mask],y\n\n def revert_shape(self,X_orig,y_out):\n \"\"\"\n Take an array with entries corresponding to non-zeros in original\n data (e.g. from ML predictions) and reshape to match original shape.\n \"\"\"\n if self.Xy_share_labels:\n N,m = X_orig.shape\n m -= self.aux_dims\n mask = self._gen_mask(X_orig.values)\n y_rev = np.zeros(N*m)\n y_rev[np.where(mask)] = y_out\n y_df = X_orig.copy()\n y_df = y_df.iloc[:,self.aux_dims:]\n if y_df.columns.nlevels > 1:\n y_df.columns = y_df.columns.droplevel(list(range(y_df.columns.nlevels-1)))\n y_df.loc[:] = y_rev.reshape((N,m))\n return y_df\n else:\n return y_out\n\n def reshape_cov2sub(self,X_orig,cov,y=None):\n \"\"\"\n Reshape the covariance array for predictions into a list of sub-covariance\n matrices, each corresponding to the covariance of predictions for a single\n given entry. Note this returns a ragged list of matrices, i.e. matrices\n do NOT contain entries for elements not present in input. \n\n Parameters\n ----------\n X_orig (pd.Dataframe) : The original input data. Used to get zero entries. \n cov (ndarray) : Covariance matrix to reshape. Provide as tuples to get joined arrays as outputs.\n y (ndarray) : Optional. Provide predictions and reshape these in the same way too.\n \"\"\"\n if self.Xy_share_labels:\n tuple_flag = True if isinstance(cov,tuple) else False\n at_nums = self.get_els(X_orig)\n m = len(at_nums)\n mask = self._gen_mask(X_orig.values)\n locs = mask.reshape(-1,m) # Locations of non-zero components\n sub_cov = [] # sub-covariance matrix list\n if y is not None:\n sub_y = []\n start_ind = 0\n for entry in locs:\n end_ind = start_ind + entry.sum()\n if tuple_flag:\n sub_covs = (cov_[start_ind:end_ind,start_ind:end_ind] for cov_ in cov)\n sub_cov += [block_diag(*sub_covs)]\n else:\n sub_cov += [cov[start_ind:end_ind,start_ind:end_ind]]\n if y is not None:\n if tuple_flag:\n sub_ys = tuple(y_[start_ind:end_ind] for y_ in y)\n sub_y += [np.concatenate(sub_ys)]\n else:\n sub_y += [y[start_ind:end_ind]]\n start_ind = copy(end_ind)\n if y is not None:\n return sub_cov,sub_y\n else: \n return sub_cov\n else:\n return\n\n def _gen_mask(self,X):\n \"\"\"\n Generates a mask to apply to remove entries corresponding to component with zero composition.\n \"\"\"\n X_ = self._c_dims(X).flatten()\n return ~(X_==0.)\n\n def _c_dims(self,X):\n \"\"\"\n Return the dimensions of the input corresponding to composition.\n \"\"\"\n return X[:,self.aux_dims:]\n\n def _a_dims(self,X,dims2use=\"all\"):\n \"\"\"\n Return the dimensions of the input correponding to auxiliary componenents (i.e. not composition).\n \"\"\"\n if dims2use==\"all\":\n return X[:,int(self.multi_output):self.aux_dims]\n else:\n return X[:,np.array(dims2use)+int(self.multi_output)]\n \n\n #~~~~~~~~~~~~~~~~~~~~~~~~ TRANSFORMATION RULES ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # _mu suffix -> chemical potential-type rule\n # _g suffix -> free energy-type rule\n # _struc suffix -> structure-type rule\n # _atom -> atom species info rule.\n\n # _tr_ prefix is required for child class to auto-find all relevant transformation rules\n \n # kwargs should be used to implement adiditonal parameters instead of args. \n # dryrun should NOT be used as a kwarg - it is popped out by the decorator. \n\n # Chemical potential-like transformations\n @inputOutputRule\n def _tr_mix_mu(self,X,at_nums,y=None):\n \"\"\"\n New rule: entropy of mixing of the alloy.\n \"\"\"\n X = self._c_dims(X)\n S = np.ma.log(X).flatten()\n return S[~S.mask].data\n\n @inputOutputRule\n def _tr_mis_mu(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Atomic size misfit of the alloy.\n \"\"\"\n # Extra parameter\n method = kwargs.get(\"method\",\"hooke\")\n\n m = self.cr[at_nums]\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n if method==\"hooke\":\n out = (np.repeat(m.reshape(-1,1),X.shape[0],axis=1)-X@m).T.flatten()\n elif method==\"enthalpy\":\n out = (-(np.repeat(m.reshape(-1,1),X.shape[0],axis=1))**2+(X@m)**2).T.flatten()\n return out[mask]\n \n @inputOutputRule\n def _tr_val_mu(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Mean valence of the alloy.\n \"\"\"\n # Extra parameter\n method = kwargs.get(\"method\",\"valence\")\n\n N,n = X.shape\n m = self.va[at_nums]\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n if method==\"valence\":\n out = np.repeat(m.reshape(-1,1),N,axis=1).T.flatten()\n return out[mask]\n elif method==\"sommerfeld\":\n pass\n \n @inputOutputRule\n def _tr_eng_mu(self,X,at_nums,y=None):\n \"\"\"\n Electronegativities of the alloy.\n \"\"\"\n m = self.en[at_nums]\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n M = (m.reshape(-1,1)-(m.T))**2\n out = np.einsum(\"kj,ij->ki\",X,M).flatten()\n return out[mask]\n\n # Free energy-like transformations\n @inputRule\n def _tr_mix_g(self,X,at_nums,y=None):\n \"\"\"\n New rule: entropy of mixing of the alloy.\n \"\"\"\n X = self._c_dims(X)\n S = (X*np.ma.log(X).filled(0.)).sum(axis=1)\n return S\n\n @inputRule\n def _tr_mis_g(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Atomic size misfit of the alloy.\n \"\"\"\n # Extra parameter\n method = kwargs.get(\"method\",\"hooke\")\n\n X = self._c_dims(X)\n m = self.cr[at_nums]\n if method==\"hooke\":\n out = (X*(np.repeat(m.reshape(-1,1),X.shape[0],axis=1)-X@m).T**2).sum(axis=1)\n elif method==\"enthalpy\":\n out = (X*(-(np.repeat(m.reshape(-1,1),X.shape[0],axis=1))**3+(X@m)**3).T).sum(axis=1)\n return out\n\n @inputRule\n def _tr_val_g(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Mean valence of the alloy.\n \"\"\"\n # Extra parameter\n method = kwargs.get(\"method\",\"valence\")\n\n X = self._c_dims(X)\n m = self.va[at_nums]\n if method==\"valence\":\n return X@m\n elif method==\"sommerfeld\":\n pass\n\n @inputRule\n def _tr_eng_g(self,X,at_nums,y=None):\n \"\"\"\n Electronegativities of the alloy.\n \"\"\"\n X = self._c_dims(X)\n m = self.en[at_nums]\n M = (m.reshape(-1,1)-(m.T))**2\n return np.einsum(\"kj,ki,ij->k\",X,X,M)\n \n @inputRule\n def _tr_lel_g(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Mean number of l orbital electrons.\n \"\"\"\n l = kwargs.get(\"l\",\"s\")\n m = self.orbitals.get(l)[at_nums]\n X = self._c_dims(X)\n return X@m\n\n @inputRule\n def _tr_lpp_m(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Mean radius for the nl-level pseduopotentials in this alloy.\n \"\"\"\n nl = kwargs.get(\"nl\",\"1s\")\n m = self.pp_radii.get(nl)[at_nums]\n non_zero = 1.*(m > 0.)\n X = self._c_dims(X)\n # mean radii \n with np.errstate(divide=\"ignore\",invalid=\"ignore\"):\n r = (X@m)/(X@non_zero)\n r[r == np.inf] = 0.\n r = np.nan_to_num(r)\n return r\n\n @inputRule\n def _tr_lel_bins(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Bin the at. % into (user-specified) l-orbital electron bins. \n \"\"\"\n bin_ledges = copy(kwargs.get(\"bin_ledges\",[2.0]))\n l = kwargs.get(\"l\",\"s\")\n n_redges = len(bin_ledges)\n bin_ledges += [np.inf]\n X = self._c_dims(X)\n m = self.orbitals.get(l)[at_nums]\n mat = np.zeros((n_redges,len(m)))\n for i,(ledge,redge) in enumerate(zip(bin_ledges[:-1],bin_ledges[1:])):\n mat[i] = (m >= ledge) * (m < redge)\n return X@mat.T\n\n @inputRule\n def _tr_lpp_bins(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Bin the at. % into (user-specified) nl-level psudopot radius bins.\n \"\"\"\n bin_ledges = copy(kwargs.get(\"bin_ledges\",[0.0,1.0,2.0]))\n nl = kwargs.get(\"nl\",\"1s\")\n n_redges = len(bin_ledges)\n bin_ledges += [np.inf]\n X = self._c_dims(X)\n m = self.pp_radii.get(nl)[at_nums]\n mat = np.zeros((n_redges,len(m)))\n for i,(ledge,redge) in enumerate(zip(bin_ledges[:-1],bin_ledges[1:])):\n mat[i] = (m > ledge) * (m <= redge)\n return X@mat.T\n\n @inputOutputRule\n def _tr_lel_exclb(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Bin the at. % into (user-specified) l orbital electron bins. Excludes the \n element corresponding to this entry. \n \"\"\"\n bin_ledges = copy(kwargs.get(\"bin_ledges\",[2.0]))\n l = kwargs.get(\"l\",\"s\")\n n_redges = len(bin_ledges)\n bin_ledges += [np.inf]\n m = self.orbitals.get(l)[at_nums]\n mat = np.zeros((n_redges,len(m)))\n for i,(ledge,redge) in enumerate(zip(bin_ledges[:-1],bin_ledges[1:])):\n mat[i] = (m >= ledge) * (m < redge)\n # Extend X to the correct shape, set to zero for corresponding elements\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n N,n = X.shape\n X_ext = np.repeat(X,n,axis=0)\n d1_ind,d2_ind = np.diag_indices(n)\n d2_ind = d2_ind.repeat(N)\n d1_ind = ((n*np.arange(N).reshape(-1,1)).repeat(n,axis=1)+d1_ind).T.flatten()\n X_ext[(d1_ind,d2_ind)] = 0.\n X_ext = X_ext[mask]\n return X_ext@mat.T \n\n # atomic species based transformations.\n @inputOutputRule\n def _tr_amu_atom(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Atomic mass of each species\n \"\"\"\n N,n = X.shape\n m = self.am[at_nums]\n mask = self._gen_mask(X)\n out = np.repeat(m.reshape(-1,1),N,axis=1).T.flatten()\n return out[mask]\n\n @inputOutputRule\n def _tr_mps_atom(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Modified pettifor scale number for each species. \n \"\"\"\n N,n = X.shape\n m = self.mp[at_nums]\n mask = self._gen_mask(X)\n out = np.repeat(m.reshape(-1,1),N,axis=1).T.flatten()\n return out[mask]\n\n @inputOutputRule\n def _tr_atn_atom(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Atomic number for each species. \n \"\"\"\n N,n = X.shape\n mask = self._gen_mask(X)\n out = np.repeat(at_nums.reshape(-1,1),N,axis=1).T.flatten()\n return out[mask]\n\n @inputOutputRule\n def _tr_lel_atom(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n # l orbital electrons for each species. \n \"\"\"\n l = kwargs.get(\"l\",\"s\")\n N,n = X.shape\n m = self.orbitals.get(l)[at_nums]\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n out = np.repeat(m.reshape(-1,1),N,axis=1).T.flatten()\n return out[mask]\n\n @inputOutputRule\n def _tr_lpp_atom(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n # nl-level pseudopot radius for each species. \n \"\"\"\n nl = kwargs.get(\"nl\",\"1s\")\n N,n = X.shape\n m = self.pp_radii.get(nl)[at_nums]\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n out = np.repeat(m.reshape(-1,1),N,axis=1).T.flatten()\n return out[mask]\n\n @inputOutputRule\n def _tr_lex_atom(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Excess l orbital electrons for each element vs. alloy mean.\n \"\"\"\n l = kwargs.get(\"l\",\"s\")\n m = self.orbitals.get(l)[at_nums]\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n out = (np.repeat(m.reshape(-1,1),X.shape[0],axis=1)-X@m).T.flatten()\n return out[mask]\n\n @inputOutputRule\n def _tr_lpp_dif(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Misfit for this atom's nl-level pseudopot radius vs. mean.\n \"\"\"\n nl = kwargs.get(\"nl\",\"1s\")\n m = self.pp_radii.get(nl)[at_nums]\n non_zero = 1.*(m > 0.)\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n # mean radii \n with np.errstate(divide=\"ignore\",invalid=\"ignore\"):\n r = (X@m)/(X@non_zero)\n r[r == np.inf] = 0.\n r = np.nan_to_num(r)\n out = (non_zero*\\\n (np.repeat(m.reshape(-1,1),X.shape[0],axis=1)-\\\n r).T).flatten()\n return out[mask]\n\n @inputRule\n def _tr_lpp_mis(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Mean squared nl-level pseudopotential radius misfit of the alloy.\n \"\"\"\n nl = kwargs.get(\"nl\",\"1s\")\n m = self.pp_radii.get(nl)[at_nums]\n non_zero = 1.*(m > 0.)\n X = self._c_dims(X)\n # mean radii \n with np.errstate(divide=\"ignore\",invalid=\"ignore\"):\n r = (X@m)/(X@non_zero)\n r[r == np.inf] = 0.\n r = np.nan_to_num(r)\n out = (X*non_zero*(np.repeat(m.reshape(-1,1),X.shape[0],axis=1)-\\\n r).T**2).sum(axis=1)\n return out\n\n @inputOutputRule\n def _tr_cmp_atom(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Compare rdf for each element to the averaged alloy rdfs. \n \"\"\"\n # Extra parameter\n crystal = kwargs.get(\"crystal\",\"fcc\")\n return np.linalg.norm(\n self._tr_elm_struc(X,at_nums,y)-self._tr_aly_struc(X,at_nums,y,crystal=crystal),\n axis=1)\n\n # Structure-based transformations\n @inputOutputRule\n def _tr_elm_struc(self,X,at_nums,y=None):\n \"\"\"\n Find rdf for each element in the alloys.\n \"\"\"\n n_els = len(at_nums)\n mask = self._gen_mask(X)\n X = self._c_dims(X)\n N,n = X.shape\n out = np.zeros((N*n,self.rdf_0.r_coarse.shape[0])) \n for a,el in enumerate(np.array(data.chemical_symbols)[at_nums]):\n out[a::n_els,:] = self.rdf_0(el,at_nums)\n return out[mask]\n\n @inputRule\n def _tr_aly_struc(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Find an averaged rdf for each alloy. \n \"\"\"\n # Extra parameter\n crystal = kwargs.get(\"crystal\",\"fcc\")\n X = self._c_dims(X)\n return self.rdf_0(X,at_nums,crystal=crystal)\n\n @inputRule\n def _tr_lambda(self,X,at_nums,y=None,pd_input=True,apply2comp=True):\n \"\"\"\n Add a lambda function-type transformation. \n Uses functions from a list attribute of the class. \n \"\"\"\n if pd_input:\n X = pd.DataFrame(data=X,\n columns=[data.chemical_symbols[at_n] for at_n in at_nums],\n index=np.arange(X.shape[0]))\n if apply2comp:\n X = self._c_dims(X)\n else:\n X = self._a_dims(X)\n out = self.lambda_methods[0](X)\n if len(out.shape) > 1:\n out = out.reshape(-1)\n return out\n\n # auxiliary transformation rules\n @inputRule\n def _tr_ht_aux(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Transforms the heat treatments into a format with correct exchange properties.\n Assumes a temperature,time, temperature,time, ... format\n \"\"\"\n use_aux_dims = kwargs.get(\"use_aux_dims\",\"all\")\n in_celsius = kwargs.get(\"in_celsius\",True)\n num_ht = (self.aux_dims-int(self.multi_output))//2 if use_aux_dims==\"all\" else len(use_aux_dims)//2\n X = self._a_dims(X,use_aux_dims)\n T = X[:,::2]\n if in_celsius:\n T += 273.\n t = X[:,1::2]\n rep0 = (T*t).sum(axis=1,keepdims=True)\n out = np.c_[T,rep0]\n for i in range(1,num_ht):\n repi = rep0 + ((T[:,i-1]-T[:,i])*t[:,i]).reshape(-1,1)\n out = np.c_[out,repi]\n return out\n\n @inputRule\n def _tr_1hot(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Only for use with multi-output models. Transforms the multi-output categoricals features to a one-hot\n represented feature.\n \"\"\"\n N,_ = X.shape\n m = self.output_cats\n X_cat = X[:,0].astype(int) # First column will always be used for categoricals\n out = np.zeros((N,m))\n out[np.arange(N),X_cat] = 1.\n return out\n\n @inputRule\n def _tr_id(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Identity transformation. Only really meant for use with auxiliary features. \n \"\"\"\n use_aux_dims = kwargs.get(\"use_aux_dims\",\"all\")\n use_comp = kwargs.get(\"composition\",True)\n if use_aux_dims is not None:\n X_out = self._a_dims(X,use_aux_dims)\n if use_comp:\n X_out = np.c_[X_out,self._c_dims(X)]\n else:\n X_out = self._c_dims(X)\n return X_out\n\n @inputRule\n def _tr_lmp_aux(self,X,at_nums,y=None,**kwargs):\n \"\"\"\n Transforms test conditions (temp., stress) into more LMP-friendly features. \n \"\"\"\n use_aux_dims = kwargs.get(\"use_aux_dims\",\"all\")\n in_celsius = kwargs.get(\"in_celsius\",True)\n X = self._a_dims(X,use_aux_dims)\n T = X[:,0:1]\n if in_celsius:\n T += 273.\n s = X[:,1:2]\n return np.c_[T**-1,s]\n\n\n class RDF():\n # Pre-calc Thomas-Fermi wavevector for shielding\n k_per_n = np.sqrt(4*(3/np.pi)**(1/3)/0.5291772)\n\n \"\"\"\n Generate the representation of the crystal structure\n of a given alloy.\n \"\"\"\n def __init__(self,outer,\n dr=0.5,cutoff=6.0,\n smear_factor=0.1,\n use_shielding=True,\n use_valence=\"non-core\",\n coarse=True):\n self.outer = outer\n self.dr = dr\n self.rc = cutoff\n self.coarse = coarse\n self.outsize=1+int((self.rc-1.)//self.dr)\n self.r_coarse = np.linspace(1.,self.rc,self.outsize)\n self.r = np.linspace(1.,self.rc,self.outsize*100) if self.coarse else self.r_coarse.copy()\n self.smear_f = smear_factor\n self.shield = use_shielding\n self.use_v = use_valence\n # Pre-calc a few common shell numbers/radii\n fcc = bulk(\"X\",\"fcc\",1.)\n R_fcc,N_fcc = shell_radii(fcc,self.rc/2.)\n bcc = bulk(\"X\",\"bcc\",1.)\n R_bcc,N_bcc = shell_radii(bcc,self.rc/2.)\n hcp = bulk(\"X\",\"hcp\",1.,1.633)\n R_hcp,N_hcp = shell_radii(hcp,self.rc/1.5)\n self.R_fcc = R_fcc ; self.N_fcc = N_fcc\n self.R_bcc = R_bcc ; self.N_bcc = N_bcc\n self.R_hcp = R_hcp ; self.N_hcp = N_hcp\n # Densities\n self.n_fcc = fcc.get_global_number_of_atoms()/fcc.get_volume()\n self.n_bcc = bcc.get_global_number_of_atoms()/bcc.get_volume()\n self.n_hcp = hcp.get_global_number_of_atoms()/hcp.get_volume()\n\n def __call__(self,X,at_nums,crystal=\"fcc\"):\n \"\"\"\n Calculate the representation of an alloy X.\n\n Parameters\n ----------\n X (ndarray) : ndarray representing the alloy. \n Alternatively use s str representing an element.\n crystal (str): Type of crystal.\n \"\"\"\n X_vec = 1.*(np.array(at_nums)==data.atomic_numbers.get(X)) if isinstance(X,str) else X.copy()\n X_vec = np.atleast_2d(X_vec)\n mean_v = X_vec@self.outer.va[at_nums] if self.use_v else 1.\n mean_r = X_vec@self.outer.cr[at_nums]\n # Check what crystal we're using.\n if isinstance(X,str):\n struc = bulk(X)\n R,N = shell_radii(struc,self.rc)\n R = R.reshape(1,-1) ; N = N.reshape(1,-1)\n n = mean_v*struc.get_global_number_of_atoms()/struc.get_volume()\n elif crystal==\"fcc\":\n mean_a = mean_r*4/np.sqrt(2)\n R = self.R_fcc.reshape(1,-1)*mean_a.reshape(-1,1)\n N = np.repeat(self.N_fcc.reshape(-1,1),mean_a.shape[0],axis=1).T\n n = mean_v*self.n_fcc/mean_a**3\n elif crystal==\"hcp\":\n mean_a = mean_r*2\n R = self.R_hcp.reshape(1,-1)*mean_a.reshape(-1,1)\n N = np.repeat(self.N_hcp.reshape(-1,1),mean_a.shape[0],axis=1).T\n n = mean_v*self.n_hcp/mean_a**3\n elif crystal==\"bcc\":\n mean_a = mean_r*4/np.sqrt(3)\n R = self.R_bcc.reshape(1,-1)*mean_a.reshape(-1,1)\n N = np.repeat(self.N_bcc.reshape(-1,1),mean_a.shape[0],axis=1).T\n n = mean_v*self.n_bcc/mean_a**3\n else:\n # Needs to be modified to work properly\n mean_a = 2.*mean_r # <- Probably wrong\n struc = bulk(\"X\",crystal,mean_a)\n R,N = shell_radii(struc,self.rc)\n # density of electrons\n n = mean_v*struc.get_global_number_of_atoms()/struc.get_volume()\n # Find Thomas-Fermi wavenumber for shielding\n k_s = self.k_per_n*n**(1/6) if self.shield else np.zeros_like(mean_v)\n # Calculation of rep starts here.\n r = self.r\n rdf = np.einsum(\"ij,ijk->ijk\",\n (4*np.pi*R)**-2*N*mean_v.reshape(-1,1)\\\n /(self.smear_f*mean_r.reshape(-1,1)*np.sqrt(2*np.pi)),\n np.exp(-0.5*((np.atleast_2d(r)-np.atleast_3d(R))\\\n /(self.smear_f*mean_r).reshape(-1,1,1))**2)).sum(axis=1)\\\n * np.exp(-k_s.reshape(-1,1)*r.reshape(1,-1))\n if self.coarse:\n rdf = rdf.reshape(rdf.shape[0],-1,100).mean(axis=2)\n return rdf\n\ndef str2method_kwargs(ft_string):\n \"\"\"\n Parse an input string representing a HR transformation function.\n \"\"\"\n parts = ft_string.split(\"|\")\n method = \"_tr_\"+parts[0]\n kwgs_l = parts[1:]\n kwargs = {}\n for k in kwgs_l:\n kw,v = k.split(\"=\")\n kwargs[kw] = ast.literal_eval(v)\n return method,kwargs\n\n\nclass HRrep(HRrep_parent):\n \"\"\"\n This class carries out the transformation into the Hume-Rothery basis.\n It has been written to work with sklearn.pipeline.Pipeline.\n Feature names can be suffixed with kwargs by using commas \"|\" e.g. the \n string \"mis_mu|method=enthalpy\" will add the mis_mu method as a transform \n with the kwarg method=enthalpy always being passed to it. \n\n The currently implemented features are:\n mix_mu Chem. pot.-like term for free energy of mixing. \n mis_mu Chem. pot.-like term for atomic size misfit energy.\n val_mu Chem. pot.-like term for electronic free energy (=valence).\n eng_mu Chem. pot.-like term for enthalpy of mixing (uses electroneg).\n mix_g Free energy of mixing.\n mis_g Atomic size misfit energy.\n val_g Electronic free energy (= mean valence).\n eng_g Enthalpy of mixing using electronegativites.\n sel_atom # s electrons.\n pel_atom # p electrons.\n del_atom # d electrons.\n fel_atom # f electrons.\n cmp_atom 2-norm similairty of atom-species RDF to (averaged) alloy RDF.\n elm_struc Atom-species RDF (ndarray).\n aly_struc Averaged RDF for the alloy (ndarray).\n \"\"\"\n def __init__(self,*features,\n Xy_share_labels=False,\n aux_dims=None,\n multi_output=False,\n rdf_rep_params={}):\n super().__init__(*features,\n Xy_share_labels=Xy_share_labels,\n aux_dims=aux_dims,\n multi_output=multi_output,\n rdf_rep_params=rdf_rep_params)\n self._get_feature_methods()\n\n def add_feature(self,new_features):\n \"\"\"\n Add new features to the class. Added to END of feature list. \n \n Parameters\n ----------\n new_features (list, str) : A list of new features to add. See __init__ for details.\n \"\"\"\n lambda_methods = []\n ft_methods = []\n ft_kwargs = []\n ft_list = []\n groups = []\n n_found = 0\n m_found = 0\n all_methods = dir(HRrep_parent)\n for ft in new_features:\n if isinstance(ft,str):\n gp_start = copy(m_found)\n method,kwargs = str2method_kwargs(ft)\n if method in all_methods:\n n_found += 1\n this_method = getattr(HRrep,method)\n ft_methods += [this_method]\n ft_kwargs += [kwargs]\n # Calling functions with dryrun kwarg returns output shape\n m = this_method(self,dryrun=True,**kwargs)\n m_found += m\n ft_list += [\"[{}]__{}\".format(ft,i) for i in range(m)]\n gp_end = copy(m_found)\n # Also valid to supply feature names pre-grouped using tuples/lists\n elif hasattr(ft,\"__len__\"):\n ft_group = copy(ft)\n gp_start = copy(m_found)\n for ft in ft_group:\n method,kwargs = str2method_kwargs(ft)\n if method in all_methods:\n n_found += 1\n this_method = getattr(HRrep,method)\n ft_methods += [this_method]\n ft_kwargs += [kwargs]\n # Calling functions with dryrun kwarg returns output shape\n m = this_method(self,dryrun=True,**kwargs)\n m_found += m\n ft_list += [\"[{}]__{}\".format(ft,i) for i in range(m)]\n gp_end = copy(m_found)\n elif callable(ft):\n gp_start = copy(m_found)\n method = \"_tr_lambda\"\n n_found += 1\n this_method = getattr(HRrep,method)\n ft_methods += [this_method]\n ft_kwargs += [{}] # Can't use kwargs with lambda method (for now)\n lambda_methods += [ft] # Only calls first one for now.\n m = this_method(self,dryrun=True)\n m_found += m\n ft_list += [\"[{}]__{}\".format(\"lambda\",i) for i in range(m)]\n gp_end = copy(m_found)\n else:\n warnings.warn(\"Method {} not found.\".format(ft),UserWarning)\n continue\n if gp_start!=gp_end: # Have found at least one valid feature\n groups += [[gp_start,gp_end]]\n self.nft_out += n_found\n self.dim_out += m_found\n self.features += ft_methods\n self.ft_kwargs += ft_kwargs\n self.ft_list += ft_list\n self.groups += groups\n self.lambda_methods += lambda_methods\n\n def _get_feature_methods(self):\n self.nft_out = 0\n self.dim_out = 0\n self.features = []\n self.ft_kwargs = []\n self.ft_list = []\n self.groups = []\n self.lambda_methods = []\n self.add_feature(self.ft_names)\n\n def transform(self,X,y=None):\n \"\"\"\n Transform data according to selected rules. \n \"\"\"\n at_nums = self.get_els(X)\n X = X.values\n y = y if y is None else y.values\n if self.Xy_share_labels:\n # Start the output array\n mask = self._gen_mask(X)\n X_out = np.zeros((mask.sum(),0))\n for i,(ft,kwargs) in enumerate(zip(self.features,self.ft_kwargs)):\n X_out = np.c_[X_out,ft(self,X,at_nums,y,**kwargs)]\n if y is not None:\n _,y_out = self.extend_original(X,y)\n else:\n y_out = None\n elif self.multi_output:\n # Start the output array\n if y is not None:\n _,y_out = self.multi_2_single_output(X,y)\n X_out = np.zeros((y_out.shape[0],0))\n else:\n X_out = np.zeros((X.shape[0]*self.output_cats,0))\n y_out = None\n for i,(ft,kwargs) in enumerate(zip(self.features,self.ft_kwargs)):\n X_out = np.c_[X_out,ft(self,X,at_nums,y,**kwargs)]\n else:\n # Start the output array\n X_out = np.zeros((X.shape[0],0))\n for i,(ft,kwargs) in enumerate(zip(self.features,self.ft_kwargs)):\n X_out = np.c_[X_out,ft(self,X,at_nums,y,**kwargs)]\n if y is not None:\n y_out = y.reshape(-1,1)\n else:\n y_out = None\n return X_out,y_out\n \n def fit_transform(self,X,y=None):\n return self.fit(X,y).transform(X,y)\n\nclass Logy():\n \"\"\"log transformation of predicted property.\"\"\"\n def __init__(self,trans_distribution=False):\n self.trans_dist = trans_distribution\n\n def fit(self,k):\n return self\n\n @staticmethod\n def transform(k):\n return np.log(k)\n\n @staticmethod\n def inverse_transform(p,std=None):\n return np.exp(p)\n\n @staticmethod\n def inverse_std(p,p_std):\n p_std = p_std.reshape(p.shape)\n return p_std*np.exp(p)\n\n @staticmethod\n def inverse_cov(p,p_cov):\n p = p.reshape(-1,1)\n return (np.exp(p)@np.exp(p).T)*p_cov\n\n @staticmethod\n def transform_cov(k,k_cov):\n k = k.reshape(-1,1)\n return k_cov/(k@k.T)\n\nclass ArcTanh():\n \"\"\"arctanh transformation of predicted property.\"\"\"\n def __init__(self,trans_distribution=False):\n self.trans_dist = trans_distribution\n\n def fit(self,f):\n return self\n\n @staticmethod\n def transform(f):\n return np.arctanh(2*f-1.)\n\n @staticmethod\n def inverse_transform(q,q_std=None):\n return 0.5*(1. + np.tanh(q))\n\n @staticmethod\n def inverse_std(q,q_std):\n q_std = q_std.reshape(q.shape)\n return 0.5*q_std*np.cosh(q)**-2\n\n @staticmethod\n def inverse_cov(q,q_cov):\n q = q.reshape(-1,1)\n return 0.25*((np.cosh(q)**-2)@(np.cosh(q)**-2).T)*q_cov\n \n @staticmethod\n def transform_cov(f,f_cov):\n f = f.reshape(-1,1)\n return 0.25*f_cov/((f*(1-f))@((f*(1-f)).T))\n\nclass SplitByGeq():\n \"\"\"\n Split X,y data into two datasets, depending on value of y. \n \"\"\"\n def __init__(self,split=1.0,max_n_per_split=True):\n \"\"\"\n Parameters\n ----------\n split (float) : Value which is used to split dataset by y. \n max_n_per_split (bool) : Whether to add the values==split data pts into each split.\n \"\"\"\n self.num_splits = 2\n self.split_ = split\n self.max_ = max_n_per_split\n\n\n def split(self,X,y):\n y = y.reshape(-1)\n inds_u = (y >= self.split_)\n inds_l = (y <= self.split_) if self.max_ else ~inds_u \n return (X[inds_l],y[inds_l]),(X[inds_u],y[inds_u])\n\nclass Bagging():\n \"\"\"\n Implements bagging via a random split of the features. \n \"\"\"\n def __init__(self,num_splits=24,max_features=0.8,min_features=3,seed=1921):\n self.num_splits = num_splits\n self.max_features = max_features\n self.min_features = min_features\n self.seed = seed\n self.rng = np.random.default_rng(seed)\n self.split_inds = []\n\n def reseed(self,seed=None):\n new_seed = self.seed+1 if seed is None else seed\n self.rng = np.random.default_rng(seed)\n\n def _sample(self,a,n,X,cov):\n inds = np.arange(X.shape[1])\n used = [a]\n # Use covariance to weight choice of next feature\n for i in range(n):\n wts = np.abs(cov[used].sum(0))**-1\n wts[used] = 0.\n wts /= wts.sum()\n nxt = self.rng.choice(inds,p=wts)\n used += [nxt]\n return used\n\n def fit(self,X,y):\n N,n_fts = X.shape\n max_fts = int(np.around(self.max_features*n_fts,0)) if isinstance(self.max_features,float) else self.max_features\n min_fts = int(np.around(self.min_features*n_fts,0)) if isinstance(self.min_features,float) else self.min_features\n # Estimate covariance in order to choose least correlated features\n cov = np.cov(X,rowvar=False)\n cov /= np.sqrt(np.diag(cov).reshape(-1,1)@np.diag(cov).reshape(1,-1))\n starts = self.rng.integers(0,X.shape[1],self.num_splits)\n lens = self.rng.integers(min_fts,max_fts,self.num_splits)\n for a,n in zip(starts,lens):\n self.split_inds += [self._sample(a,n,X,cov)]\n\n def transform(self,X,y):\n splits = ()\n for inds in self.split_inds:\n splits += ((X[:,inds],y),)\n return splits\n\nclass ModelClass():\n \"\"\"\n A simple model class, something like sklearn.pipeline.Pipeline,\n but with explicit pipeline steps. \n\n Parameters\n ----------\n phys_transformer: Should be something with all the methods of HRrep or None.\n splitter : Should have a fit method. Also a transform method, taking X,y as inputs, returning ((X,y),...). Also needs a .num_splits attribute\n y_transformer : Simple transformer, needs inverse_transform method.\n y_scaler : Simple scaler, needs inverse_transform method.\n X_scaler : Like sklearn.preprocessing.StandardScaler\n X_transformer : Simple transformer (needs fit,transform methods).\n regressor : Any sklearn regression model class should work.\n committee_method: Either \"weighted\" or \"best\". Only used when there is a y_splitter\n \"\"\"\n def __init__(self,phys_transformer=None,\n splitter = None,\n y_transformer=None,\n y_scaler=None,\n X_scaler=None,\n X_transformer=None,\n regressor=None,\n n_jobs=-1,\n bagging_method = \"weighted\"):\n self.phys_transformer=deepcopy(phys_transformer)\n self.y_transformer=deepcopy(y_transformer)\n # y_splitter stuff\n self.bagging_method = bagging_method\n self.splitter = splitter\n if self.splitter is not None:\n self.num_splits = self.splitter.num_splits\n else:\n self.num_splits = 1\n # Add dicts for all other methods\n self.y_scaler = deepcopy(y_scaler)\n self.X_scaler = deepcopy(X_scaler)\n self.X_transformer = deepcopy(X_transformer)\n self.regressor = {}\n if regressor is not None:\n for a in range(self.num_splits):\n self.regressor[a] = deepcopy(regressor)\n self.n_jobs = n_jobs\n # Flags\n self.fitted = False\n\n def _fit_by_split(self,X,y,a):\n if self.regressor[a] is not None:\n self.regressor[a].fit(X,y)\n\n def fit(self,X,y):\n \"\"\"\n Fit model.\n \"\"\"\n X = X.copy() ; y = y.copy()\n if self.phys_transformer is not None:\n X,y = self.phys_transformer.fit(X,y).transform(X,y)\n if self.y_transformer is not None:\n y = self.y_transformer.fit(y).transform(y)\n if self.y_scaler is not None:\n y = y.reshape(-1,1)\n y = self.y_scaler.fit(y).transform(y)\n if self.X_scaler is not None:\n X = self.X_scaler.fit(X).transform(X)\n if self.X_transformer is not None:\n X = self.X_transformer.fit(X).transform(X)\n if self.splitter is not None:\n self.splitter.fit(X,y)\n splits = self.splitter.transform(X,y)\n else:\n splits = ((X,y),)\n if self.n_jobs != 1:\n Parallel(n_jobs=self.n_jobs,backend=\"threading\")(delayed(self._fit_by_split)(X,y,a) \n for a,(X,y) in enumerate(splits))\n else:\n for a,(X,y) in enumerate(splits):\n self._fit_by_split(X,y,a)\n self.fitted = True\n return self\n \n def _predict_by_split(self,X,a,complete_transform=True):\n if not self.fitted:\n raise RuntimeError(\"This ModelClass object has not been fitted.\")\n X = X.copy()\n if self.regressor[a] is not None:\n y_prd_0,cov = self.regressor[a].predict(X,return_std=False,\n return_cov=True)\n y_unc_0 = np.sqrt(np.diag(cov))\n if self.y_scaler is not None:\n y_prd_0 = self.y_scaler.inverse_transform(y_prd_0)\n y_prd_0 = y_prd_0.reshape(-1)\n # Uncertainty\n scaler = deepcopy(self.y_scaler)\n scaler.set_params(with_mean=False)\n y_unc_0 = scaler.inverse_transform(y_unc_0)\n y_unc_0 = y_unc_0.reshape(-1)\n # Covariance\n cov *= scaler.scale_**2\n cov_inv = inv(cov)\n # Can skip transformation step.\n if not complete_transform:\n return y_prd_0.reshape(-1),y_unc_0.reshape(-1),cov,cov_inv\n if self.y_transformer is not None:\n y_prd_1 = self.y_transformer.inverse_transform(y_prd_0,y_unc_0)\n y_unc_1 = self.y_transformer.inverse_std(y_prd_0,y_unc_0)\n y_prd_0 = y_prd_1.copy() ; y_unc_0 = y_unc_1.copy()\n return y_prd_0.reshape(-1),y_unc_0.reshape(-1)\n\n def predict(self,X,return_std=False,return_cov=False):\n \"\"\"\n Make predictions using fitted model. \n \"\"\"\n if not self.fitted:\n raise RuntimeError(\"This ModelClass object has not been fitted.\")\n X_orig = X.copy()\n # Pre-regressor transformations\n if self.phys_transformer is not None:\n X,_ = self.phys_transformer.transform(X)\n if self.X_scaler is not None:\n X = self.X_scaler.transform(X)\n if self.X_transformer is not None:\n X = self.X_transformer.transform(X)\n if self.splitter is not None:\n splits = self.splitter.transform(X,None)\n else:\n splits = ((X,None),)\n if self.n_jobs != 1:\n p_out = Parallel(n_jobs=self.n_jobs,backend=\"threading\")(delayed(\n self._predict_by_split)(X_a,a,complete_transform=False)\n for a,(X_a,_) in enumerate(splits)\n )\n else:\n p_out = []\n for a,(X_a,_) in enumerate(splits):\n p_out += [self._predict_by_split(X_a,a,complete_transform=False)]\n\n # recombine\n if self.splitter is not None:\n y_prd = np.zeros(X.shape[0])\n cov_inv = np.zeros((X.shape[0],X.shape[0]))\n cov = cov_inv.copy()\n if self.bagging_method == \"weighted\":\n for model_result in p_out:\n y_prd_a,y_unc_a,cov_a,cov_inv_a = model_result\n cov_inv += cov_inv_a\n y_prd += cov_inv_a@y_prd_a\n cov = inv(cov_inv)\n y_prd = cov@y_prd\n y_unc = np.diag(cov)\n elif self.bagging_method == \"average\":\n for model_result in p_out:\n y_prd_a,y_unc_a,cov_a,cov_inv_a = model_result\n y_prd += y_prd_a\n cov += cov_a\n y_prd /= self.num_splits\n cov /= self.num_splits\n y_unc = np.diag(cov)\n else:\n y_prd,y_unc,cov,cov_inv = p_out[0]\n # Finally use y_transformer\n if self.y_transformer is not None:\n y_prd_1 = self.y_transformer.inverse_transform(y_prd,y_unc)\n y_unc_1 = self.y_transformer.inverse_std(y_prd,y_unc)\n y_prd = y_prd_1.copy() ; y_unc = y_unc_1.copy() \n cov_1 = self.y_transformer.inverse_cov(y_prd,cov)\n cov = cov_1.copy()\n # self.phys_transformer appears twice because y needs to be reshaped.\n # cov matrix not reshaped. \n if self.phys_transformer is not None:\n y_out = self.phys_transformer.revert_shape(X_orig,y_prd)\n u_out = self.phys_transformer.revert_shape(X_orig,y_unc)\n else: \n y_out = None\n u_out = None\n # Return\n if self.phys_transformer is not None and self.phys_transformer.Xy_share_labels:\n if return_std:\n if return_cov:\n return y_prd,y_out,y_unc,u_out,cov\n else:\n return y_prd,y_out,y_unc,u_out\n else:\n if return_cov:\n return y_prd,y_out,cov\n else:\n return y_prd,y_out\n else:\n if return_std:\n if return_cov:\n return y_prd,y_unc,cov\n else:\n return y_prd,y_unc\n else:\n if return_cov:\n return y_prd,cov\n else:\n return y_prd\n\n def score(self,X,y_true,method=r2_score,bcl_ignore_0s=True):\n \"\"\"\n Score the model. \n\n Parameters\n ----------\n X (pd.Dataframe) : Data descriptors\n y_true (pd.Dataframe) : Data labels. \n method : Method of scoring data. \n bcl_ignore_0s (bool) : When data shares common labels, whether or not to ignore 0s for purposes of calculating R^2. \n \"\"\"\n \n if self.phys_transformer is not None and self.phys_transformer.Xy_share_labels:\n y_prd_0,y_out = self.predict(X)\n _,y_tru_0 = self.phys_transformer.extend_original(X.values,y_true.values)\n r2_0 = method(y_tru_0,y_prd_0)\n # Calculate R^2 for common labels. \n r2_bcl = pd.Series(dtype=np.float64)\n X = X.loc[:,(\"Composition\",)]\n for cl in X.columns:\n if bcl_ignore_0s:\n x = X[cl].values.copy()\n y_t_i = y_true[cl].values.copy() ; y_p_i = y_out[cl].values.copy() \n y_t_i = y_t_i[~(x==0.)]\n y_p_i = y_p_i[~(x==0.)]\n if y_t_i.shape[0] < 2:\n r2 = np.nan\n else:\n r2 = method(y_t_i,y_p_i)\n else:\n r2 = method(y_true[cl].values,y_out[cl].values)\n r2_bcl = r2_bcl.append(pd.Series({cl:r2}))\n else:\n y_prd_0 = self.predict(X)\n r2_0 = method(y_true,y_prd_0)\n r2_bcl = None\n return r2_0, r2_bcl\n\n def save(self):\n return deepcopy(self)\n\nclass GModelClass(ModelClass):\n def __init__(self,phys_transformer=None,\n splitter = None,\n y_transformer=None,\n y_scaler=None,\n X_scaler=None,\n X_transformer=None,\n regressor_cls=None,\n likelihood = None,\n mean_model = None,\n covar_model = None,\n optimizer_lr = 0.1,\n max_iter = 1e5,\n conv_tol = 1.e-4,\n optimizer_method = \"adam\",\n n_restarts = 1,\n restart_method = \"random\",\n n_jobs=1,\n bagging_method = \"weighted\",\n use_cuda = True,\n seed=1958):\n super(GModelClass,self).__init__(phys_transformer,splitter,y_transformer,y_scaler,\n X_scaler,X_transformer,None,n_jobs,bagging_method)\n \n if use_cuda:\n if torch.cuda.is_available():\n self.use_cuda = True\n self.n_jobs = 1\n else:\n self.use_cuda = False\n warnings.warn(\"CUDA is not currently available on this device.\",UserWarning)\n else:\n self.use_cuda = False\n self.regressor_cls = regressor_cls\n self.likelihood = likelihood\n self.mean_model = mean_model\n self.covar_model = covar_model\n self.max_iter = int(max_iter)\n self.optimizer_lr = optimizer_lr if hasattr(optimizer_lr,\"__len__\") else [optimizer_lr]\n self.conv_tol = conv_tol\n self.n_restarts = n_restarts\n self.restart_method = restart_method\n self.optimizer_method = optimizer_method.lower()\n self.states = {}\n self.training_data = {}\n self.fit_history = {}\n\n self.rng = np.random.default_rng(seed)\n\n def _fit_by_split(self,X,y,a):\n if self.regressor_cls is not None:\n X = torch.from_numpy(X)\n y = torch.from_numpy(y.reshape(-1))\n likelihood = deepcopy(self.likelihood)\n if self.mean_model is not None and self.covar_model is not None:\n model = self.regressor_cls(X,y,likelihood,\n self.mean_model,self.covar_model)\n elif self.mean_model is not None:\n model = self.regressor_cls(X,y,likelihood,\n mean_module=self.mean_model)\n elif self.covar_model is not None:\n model = self.regressor_cls(X,y,likelihood,\n covar_module=self.covar_model)\n else:\n model = self.regressor_cls(X,y,likelihood)\n if self.use_cuda:\n X = X.cuda()\n y = y.cuda()\n likelihood = likelihood.cuda()\n model = model.cuda()\n # Put into training mode\n model.train()\n likelihood.train()\n # \"Loss\" for GPs - the marginal log likelihood\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\n # Def optimization to run in loop.\n def optimise(lr):\n if self.optimizer_method == \"adam\":\n # Use the adam optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=lr) # Includes GaussianLikelihood parameters\n elif self.optimizer_method == \"lbfgs\":\n # Use the L-BFGS-B optimiser. \n optimizer = torch.optim.LBFGS(model.parameters(),lr=lr)\n iter_ = 0 ; old_loss = 1.e8 ; rel_change = 1.e8\n while iter_self.conv_tol:\n # Zero gradients from previous iteration\n optimizer.zero_grad()\n # Output from model\n output = model(X)\n # Calc loss and backprop gradients\n loss = -mll(output, y)\n loss.backward()\n if self.optimizer_method==\"lbfgs\":\n # Need this for lbfgs optimizer\n def closure():\n optimizer.zero_grad()\n output = model(X)\n loss = -mll(output,y)\n loss.backward()\n return loss\n optimizer.step(closure)\n else:\n optimizer.step()\n iter_ += 1\n # stopping criteria\n new_loss = loss.item()\n rel_change = abs((old_loss - new_loss)/new_loss)\n old_loss = 1.*new_loss\n if iter_ == self.max_iter:\n warnings.warn(\"Max number of iteration ({:.2e}) reached.\".format(self.max_iter),UserWarning)\n # Run optimisation, restart as many times as n_restarts\n # Store these as we loop over lr, restarts\n init_r_lengths = model.state_dict().get(\"covar_module.base_kernel.raw_lengthscale\",None).clone()\n loss_list = np.zeros((self.n_restarts,len(self.optimizer_lr)))\n r_lengths_dict = {}\n for lr_trial,lr in enumerate(self.optimizer_lr):\n model.initialize(\n **{\"covar_module.base_kernel.raw_lengthscale\":init_r_lengths.clone()})\n for repeat in range(self.n_restarts):\n optimise(lr)\n r_lengths = model.state_dict().get(\"covar_module.base_kernel.raw_lengthscale\",None).clone()\n constraint = model.covar_module.base_kernel.raw_lengthscale_constraint\n t_lengths = constraint.transform(r_lengths)\n loss_list[repeat,lr_trial] = -mll(model(X), y) + (t_lengths < 4.e3).sum()\n r_lengths_dict[(repeat,lr_trial)] = r_lengths\n # if r_lengths is None:\n # break\n # Transform the true lengthscales\n if self.restart_method == \"random\":\n new_r_lengths = r_lengths[:,self.rng.permutation(r_lengths.shape[1])]\n else:\n new_t_lengths = t_lengths**1.5\n new_r_lengths = constraint.inverse_transform(new_t_lengths)\n model.initialize(\n **{\"covar_module.base_kernel.raw_lengthscale\":new_r_lengths.clone()})\n # Use the best lengths for final model.\n if r_lengths is not None:\n use_rep = tuple(np.argwhere(loss_list==loss_list.min())[0])\n model.initialize(\n **{\"covar_module.base_kernel.raw_lengthscale\":r_lengths_dict[use_rep]})\n if self.use_cuda:\n self.regressor[a] = (deepcopy(model.cpu()),deepcopy(likelihood.cpu()))\n self.states[a] = (model.cpu().state_dict(),likelihood.cpu().state_dict())\n else:\n self.regressor[a] = (deepcopy(model),deepcopy(likelihood))\n self.states[a] = (model.state_dict(),likelihood.state_dict())\n self.training_data[a] = [X.cpu(),y.cpu()]\n # Store fit history\n self.fit_history[a] = {\"learning rates\":self.optimizer_lr,\n \"losses\":loss_list,\n \"kernel_lengthscales\":r_lengths_dict}\n\n def _predict_by_split(self,X,a,complete_transform=True):\n if not self.fitted:\n raise RuntimeError(\"This GModelClass object has not been fitted.\")\n X = torch.from_numpy(X)\n model,likelihood = self.regressor[a]\n # Switch into predictive posterior mode\n model.eval()\n likelihood.eval()\n with torch.no_grad(),gpytorch.settings.fast_pred_var():\n observed_pred = likelihood(model(X))\n y_prd_0 = observed_pred.mean\n cov = observed_pred.covariance_matrix\n # Convert to numpy now\n y_prd_0 = y_prd_0.numpy()\n cov = cov.detach().numpy() # Unsure why this is needed. It appeared in an error code once. \n y_unc_0 = np.sqrt(np.diag(cov))\n # ~~~~~~~~~~ Copied from parent method ~~~~~~~~~~ #\n if self.y_scaler is not None:\n y_prd_0 = self.y_scaler.inverse_transform(y_prd_0)\n y_prd_0 = y_prd_0.reshape(-1)\n # Uncertainty\n scaler = deepcopy(self.y_scaler)\n scaler.set_params(with_mean=False)\n y_unc_0 = scaler.inverse_transform(y_unc_0)\n y_unc_0 = y_unc_0.reshape(-1)\n # Covariance\n cov *= scaler.scale_**2\n cov_inv = inv(cov)\n # Can skip transformation step.\n if not complete_transform:\n return y_prd_0.reshape(-1),y_unc_0.reshape(-1),cov,cov_inv\n if self.y_transformer is not None:\n y_prd_1 = self.y_transformer.inverse_transform(y_prd_0,y_unc_0)\n y_unc_1 = self.y_transformer.inverse_std(y_prd_0,y_unc_0)\n y_prd_0 = y_prd_1.copy() ; y_unc_0 = y_unc_1.copy()\n return y_prd_0.reshape(-1),y_unc_0.reshape(-1)\n\n def save(self):\n \"\"\"\n Return a pickle-able version of the model instance.\n \"\"\"\n inter_copy = deepcopy(self)\n if self.fitted:\n del inter_copy.regressor\n inter_copy.regressor = {}\n return inter_copy\n \n def reload(self):\n \"\"\"\n After loading a 'saved' model instance, recreate the \n models from the stored state_dicts. \n \"\"\"\n for a in range(self.num_splits):\n X,y = self.training_data[a]\n likelihood = deepcopy(self.likelihood)\n if self.mean_model is not None and self.covar_model is not None:\n model = self.regressor_cls(X,y,likelihood,\n self.mean_model,self.covar_model)\n elif self.mean_model is not None:\n model = self.regressor_cls(X,y,likelihood,\n mean_module=self.mean_model)\n elif self.covar_model is not None:\n model = self.regressor_cls(X,y,likelihood,\n covar_module=self.covar_model)\n else:\n model = self.regressor_cls(X,y,likelihood)\n m_state,l_state = self.states[a]\n model.load_state_dict(m_state)\n likelihood.load_state_dict(l_state)\n self.regressor[a] = (model.cpu(),likelihood.cpu())\n\n\ndef part_coeffs(c1,c2,tol=0.001,composition=None):\n \"\"\"\n Calculate partitioning coeffiicents for arbitrary phases.\n \n Parameters\n ----------\n c1 (pd.Dataframe) : Numerator phase composition.\n c2 (pd.Dataframe) : Denominator phase composition.\n tol (float) : Assumed tolerance or uncertainty in experimental measurements.\n composition (pd.Dataframe) : Corresponding alloy compositions. \n \"\"\"\n c1_ = c1.values ; c2_ = c2.values\n pc_ = np.ma.masked_where(c1_==0.,c1_).filled(tol)\\\n /np.ma.masked_where(c2_==0.,c2_).filled(tol)\n if composition is not None:\n pc = np.ma.masked_where(composition.values==0.,pc_)\n pc = c1.copy()\n pc.loc[:] = pc_\n return pc\n\n#%% Precipitate fraction inferral\ndef bayesian_f(f_prior,f_uncp,X_p1,X_u1,X_p2,X_u2,X):\n \"\"\"\n Bayesian inferral of new value for the precipitate fraction, as proposed in \n microstructure paper.\n\n Parameters\n ----------\n f_prior (ndarray) : Direct predictions for the fraction of 2nd phase, prior for this method.\n f_uncp (ndarray) : Uncertainty associated with prediction above.\n X_p1 (ndarray) : Predictions for the compositions of 1st phase\n X_u1 (ndarray) : Uncertainties associated with above prediction. \n X_p2 (ndarray) : Predictions for the composiiton of 2nd phase. \n X_u2 (ndarray) : Uncertianties associated with above prediction. \n X (ndarray) : Input composition of alloys. \n\n Returns\n -------\n f_infr (ndarray) : New inferred values for fractions of 2nd phase.\n f_unci (ndarray) : Associated uncertainties.\n \"\"\"\n unc_est2 = f_prior[:,np.newaxis]**2*X_u2**2\\\n +(1.-f_prior[:,np.newaxis])**2*X_u1**2\n denom = (1+np.nansum((X_p2-X_p1)**2*f_uncp**2/unc_est2,axis=1))**-1\n f_infr = (f_prior+np.nansum((X-X_p1)*(X_p2-X_p1)*f_uncp**2/unc_est2,axis=1))\\\n *denom\n f_unci = f_uncp*np.sqrt(denom)\n return f_infr,f_unci\n\n#%% Structure comparison\nclass CrystalRep():\n # Pre-calc Thomas-Fermi wavevector for shielding\n k_per_n = np.sqrt(4*(3/np.pi)**(1/3)/0.5291772)\n\n \"\"\"\n Generate the representation of the crystal structure\n of a given alloy.\n \"\"\"\n def __init__(self,elements,\n dr=0.01,cutoff=10.0,\n smear_factor=0.1,\n use_shielding=True,\n use_valence=\"non-core\"):\n self.dr = dr\n self.rc = cutoff\n self.smear_f = smear_factor\n self.shield = use_shielding\n self.use_v = use_valence\n # Elements\n self.els = np.array(elements)\n self.at_ns = [data.atomic_numbers.get(el) for el in self.els]\n # Pre-calc a few common shell numbers/radii\n fcc = bulk(\"X\",\"fcc\",1.)\n R_fcc,N_fcc = shell_radii(fcc,self.rc/2.)\n bcc = bulk(\"X\",\"bcc\",1.)\n R_bcc,N_bcc = shell_radii(bcc,self.rc/2.)\n hcp = bulk(\"X\",\"hcp\",1.,1.633)\n R_hcp,N_hcp = shell_radii(hcp,self.rc/1.5)\n self.R_fcc = R_fcc ; self.N_fcc = N_fcc\n self.R_bcc = R_bcc ; self.N_bcc = N_bcc\n self.R_hcp = R_hcp ; self.N_hcp = N_hcp\n # Densities\n self.n_fcc = fcc.get_global_number_of_atoms()/fcc.get_volume()\n self.n_bcc = bcc.get_global_number_of_atoms()/bcc.get_volume()\n self.n_hcp = hcp.get_global_number_of_atoms()/hcp.get_volume()\n # Get \"valences\" and radii\n self.pt = np.genfromtxt(\"hr_table.csv\",delimiter=\",\",\n missing_values=(\"nan\",),filling_values=(np.nan,),\n skip_header=1,usecols=(2,3,4,5,8,9,10))\n self.r = self.pt.T[0][self.at_ns]\n if self.use_v!=\"non-core\":\n self.v = self.pt.T[1][self.at_ns]\n else:\n self.v = self.pt.T[6][self.at_ns] \n\n def __call__(self,X,crystal=\"fcc\"):\n \"\"\"\n Calculate the representation of an alloy X.\n\n Parameters\n ----------\n X (ndarray) : ndarray representing the alloy. \n Alternatively use s str representing an element.\n crystal (str): Type of crystal.\n \"\"\"\n X_vec = 1.*(self.els==X) if isinstance(X,str) else X.copy()\n mean_v = X_vec@self.v if self.use_v else 1.\n mean_r = X_vec@self.r\n # Check what crystal we're using.\n if isinstance(X,str):\n struc = bulk(X)\n R,N = shell_radii(struc,self.rc)\n n = mean_v*struc.get_global_number_of_atoms()/struc.get_volume()\n elif crystal==\"fcc\":\n mean_a = mean_r*4/np.sqrt(2)\n R,N = self.R_fcc*mean_a,self.N_fcc\n n = mean_v*self.n_fcc/mean_a**3\n elif crystal==\"hcp\":\n mean_a = mean_r*2\n R,N = self.R_hcp*mean_a,self.N_hcp\n n = mean_v*self.n_hcp/mean_a**3\n elif crystal==\"bcc\":\n mean_a = mean_r*4/np.sqrt(3)\n R,N = self.R_bcc,self.N_bcc\n n = mean_v*self.n_bcc/mean_a**3\n else:\n mean_a = 2.*mean_r # <- Probably wrong\n struc = bulk(\"X\",crystal,mean_a)\n R,N = shell_radii(struc,self.rc)\n # density of electrons\n n = mean_v*struc.get_global_number_of_atoms()/struc.get_volume()\n # Keep shells below cutoff\n N = N[R0.] # input composition\n x1 = x*k_p[:n]\n x2 = x*k_p[n:]\n # initial covariance matrix, converted to transformed variable form\n cov = block_diag(Logy.transform_cov(k_p,k_c),\n ArcTanh.transform_cov(f_p,f_c))\n # Simply import relevant matrix / vector for solutions as lambda functions\n A,b = correction_Ab_system[n]\n \n cov_new_inv = lambda_*inv(cov) + A(*x,*x1,*x2,*f_p,1.0,1.0)\n cov_new = inv(cov_new_inv)\n var_corr = cov_new @ b(*x,*x1,*x2,*f_p,1.0,1.0)\n # Transform to correct variables. \n var_corr = var_corr.flatten()\n p_corr = Logy.transform(k_p) + var_corr[:n*m]\n q_corr = ArcTanh.transform(f_p) + var_corr[-1:]\n k_out = Logy.inverse_transform(p_corr).flatten()\n f_out = ArcTanh.inverse_transform(q_corr).flatten()\n # Transform covariance \n tr_vec = np.concatenate((k_out,2*f_out*(1.-f_out)))\n cov_out = (tr_vec.reshape(-1,1)@tr_vec.reshape(1,-1))*cov_new\n # Store\n k_prd_new[0].append(k_out[:n])\n k_unc_new[0].append(np.diag(cov_out[:n]))\n k_prd_new[1].append(k_out[n:n*2])\n k_unc_new[1].append(np.diag(cov_out[n:n*2]))\n f_prd_new.append(f_out)\n f_unc_new.append(np.diag(cov_out)[-1:])\n cov_list.append(cov)\n k_prd_new[0] = np.concatenate(k_prd_new[0])\n k_unc_new[0] = np.concatenate(k_unc_new[0])\n k_prd_new[1] = np.concatenate(k_prd_new[1])\n k_unc_new[1] = np.concatenate(k_unc_new[1])\n f_prd_new = np.concatenate(f_prd_new)\n f_unc_new = np.concatenate(f_unc_new)\n if rtn_cov:\n return tuple(k_prd_new),tuple(k_unc_new),f_prd_new,f_unc_new,cov_list\n else:\n return tuple(k_prd_new),tuple(k_unc_new),f_prd_new,f_unc_new\n\ndef bayesian_corr_2p(X,k_prd,k_scov,f_prd,f_unc,rtn_cov=False,tol=0.05):\n \"\"\"\n NOW A MODIFICATION OF bayesian_corr(). Apply a Bayesian correction to the microstructure predictions for\n 2-phase alloys. This incorporates the likelihood of the model\n fulfilling certain physical constraints with the prior from the \n ML models in order to produce corrected predictions for the part-\n -itioning coefficients.\n\n Parameters\n ----------\n k_scov (list) : ragged list of covariance matrices for each alloy.\n k_prd (list) : ragged list of predicted pcs for each alloy.\n X (pd.Dataframe) : Dataframe of input alloy compositions.\n f_prd (ndarray) : Predicted phase fractions (phase 2)\n f_unc (ndarray) : Associated phase uncertainties.\n rtn_cov (bool) : Whether or not to return covariance matrices as output. \n tol (float) : Tolerance for soft constraint on element-total as % of overall amount. \n \"\"\"\n k_prd_new = [[],[]] ; k_unc_new = [[],[]]\n f_prd_new = [] ; f_unc_new = []\n cov_list = []\n for x_full,k_c,k_p,f_p,f_c in zip(X.values,k_scov,k_prd,f_prd,f_unc):\n f_p = np.atleast_1d(f_p) ; f_c = np.atleast_2d(f_c)**2\n m = 2 # number of phases present\n n = len(k_p)//m # number of elements present.\n x = x_full[x_full>0.] # input composition\n x1 = x*k_p[:n]\n x2 = x*k_p[n:]\n # initial covariance matrix, converted to transformed variable form\n cov = block_diag(Logy.transform_cov(k_p,k_c),\n ArcTanh.transform_cov(f_p,f_c))\n # Simply import relevant matrix / vector for solutions as lambda functions\n A_l,b_l = correction_hc_Ab_system[n]\n A = A_l(*x,*x1,*x2,*f_p,tol)\n b = b_l(*x,*x1,*x2,*f_p,tol)\n corr = inv(block_diag(inv(cov),np.zeros((m,m))) + A)@b\n # The covariance is a bit different though\n cov_new_inv = inv(cov) + A[:-m,:-m]\n cov_new = inv(cov_new_inv)\n # Transform to correct variables. \n corr = corr.flatten()\n dp = corr[:n*m]\n dq = corr[n*m:n*m+1]\n k_out = k_p * (1+dp)\n f_out = (f_p*(1+np.tanh(dq)))/(1+(2*f_p-1)*np.tanh(dq))\n k_out = k_out.flatten() ; f_out = f_out.flatten()\n # Same transformations for uncertainties\n p_unc = np.sqrt(np.diag(cov_new[:n*m,:n*m]))\n q_unc = np.sqrt(cov_new[n*m:n*m+1,n*m:n*m+1])\n k_unc_out = p_unc*k_out\n f_unc_out = q_unc*2*f_out*(1.-f_out)\n k_unc_out = k_unc_out.flatten() ; f_unc_out = f_unc_out.flatten()\n # Transform covariance \n tr_vec = np.concatenate((k_out,2*f_out*(1.-f_out)))\n cov_out = (tr_vec.reshape(-1,1)@tr_vec.reshape(1,-1))*cov_new[:n*m+1,:n*m+1]\n # Store\n k_prd_new[0].append(k_out[:n])\n k_unc_new[0].append(k_unc_out[:n])\n k_prd_new[1].append(k_out[n:n*2])\n k_unc_new[1].append(k_unc_out[n:n*2])\n f_prd_new.append(f_out)\n f_unc_new.append(f_unc_out)\n cov_list.append(cov_out)\n k_prd_new[0] = np.concatenate(k_prd_new[0])\n k_unc_new[0] = np.concatenate(k_unc_new[0])\n k_prd_new[1] = np.concatenate(k_prd_new[1])\n k_unc_new[1] = np.concatenate(k_unc_new[1])\n f_prd_new = np.concatenate(f_prd_new)\n f_unc_new = np.concatenate(f_unc_new)\n if rtn_cov:\n return tuple(k_prd_new),tuple(k_unc_new),f_prd_new,f_unc_new,cov_list\n else:\n return tuple(k_prd_new),tuple(k_unc_new),f_prd_new,f_unc_new\n\ndef bayesian_corr(X,k_prd,k_scov,f_prd,f_scov):\n \"\"\"\n Apply a Bayesian correction to the microstructure predictions for\n multi-phase alloys. This incorporates the likelihood of the model\n fulfilling certain physical constraints with the prior from the \n ML models in order to produce corrected predictions for the part-\n -itioning coefficients and phases. \n\n Parameters\n ----------\n X (pd.Dataframe) : Dataframe of input alloy compositions.\n k_scov (list) : ragged list of covariance matrices for each alloy's partitioning coeffs.\n k_prd (list) : ragged list of predicitions for each alloy's partitioning coeffs.\n f_scov (ndarray) : ragged list of covariance matrices for each alloy's phases. \n f_prd (ndarray) : ragged list of predicitions for each alloy's phases.\n \"\"\"\n k_prd_new = [] ; k_unc_new = [] ; f_prd_new = [] ; f_unc_new = []\n for x_full,k_c,k_p,f_p,f_c in zip(X.values,k_scov,k_prd,f_prd,f_scov):\n m = len(f_p) # number of phases present\n n = len(k_p)//m # number of elements present.\n x = x_full[x_full>0.] # input composition\n # initial covariance matrix, converted to transformed variable form\n cov = block_diag(Logy.transform_cov(k_p,k_c),\n ArcTanh.transform_cov(f_p,f_c))\n # Correction terms relating to sum(components in phase) = 1.\n Xi_subs = [] ; y_subs = []\n for phi in range(m):\n # matrix terms\n k_cov_phi = k_c[n*phi:n*(phi+1),n*phi:n*(phi+1)]\n k_phi = k_p[n*phi:n*(phi+1)]\n sig_phi2 = x@k_cov_phi@x.T\n eps_phi = k_phi@x-1.\n Xi_phi = (((k_phi*x).reshape(-1,1))@((k_phi*x).reshape(1,-1))\\\n + eps_phi * np.diag(k_phi*x))\\\n /sig_phi2\n # vector terms\n y_phi = -eps_phi*k_phi*x/sig_phi2\n Xi_subs += [Xi_phi]\n y_subs += [y_phi]\n # Add on zero terms to these\n Xi_subs += [np.zeros((m,m))]\n y_subs += [np.zeros(m)]\n Xi = block_diag(*Xi_subs)\n Xi += np.diag(np.diag(Xi)) ; Xi *= 0.5\n y = np.concatenate(tuple(y_subs))\n # Correction terms relating to physical sum. \n Phi = np.zeros((m*(n+1),m*(n+1)))\n g = np.zeros(m*(n+1))\n for i in range(n):\n k_cov_i = k_c[i::n,i::n]\n k_i = k_p[i::n]\n sig_i2 = (np.diag(k_cov_i)*f_p**-2 + np.diag(f_c)*k_i**-2).sum()\n delta_i = (f_p*k_i).sum()-1.\n # matrix terms\n Phi11_i = (delta_i*np.diag(f_p*k_i)\\\n +((f_p*k_i).reshape(-1,1))*((f_p*k_i).reshape(1,-1)))\\\n /sig_i2\n Phi22_i = (-4*delta_i*np.diag((f_p-0.5)*f_p*k_i)\\\n +(((2.-2.*f_p)*f_p*k_i).reshape(-1,1))\\\n *(((2.-2.*f_p)*f_p*k_i).reshape(1,-1)))/sig_i2\n Phi12_i = (delta_i*np.diag((2.-2.*f_p)*f_p*k_i)\\\n +(((1.-f_p)*f_p*k_i).reshape(-1,1))\\\n *((f_p*k_i).reshape(1,-1))\\\n +((f_p*k_i).reshape(-1,1))\\\n *(((1.-f_p)*f_p*k_i).reshape(1,-1)))/sig_i2\n # vector terms\n g1_i = -delta_i*f_p*k_i/sig_i2\n g2_i = -delta_i*f_p*k_i*(2.-2.*f_p)/sig_i2 \n # Place these terms directly into the Phi matrix, g vector\n # vector\n g[i:n*m:n] = g1_i\n g[-m:] += g2_i\n # matrix\n Phi[i:n*m:n,i:n*m:n] = Phi11_i\n Phi[-m:,i:n*m:n] = Phi12_i\n Phi[i:n*m:n,-m:] = Phi12_i\n Phi[-m:,-m:] += Phi22_i\n Phi += np.diag(np.diag(Phi)) ; Phi *= 0.5\n # Correction terms relating to sum of phases\n Delta = f_p.sum()-1.\n sig_2 = f_c.sum()\n h_ = -2.*Delta*f_p*(2.-2.*f_p)/sig_2\n Theta_ = (4.*Delta*np.diag(f_p*(f_p-0.5))\\\n +(((2.-2.*f_p)*f_p).reshape(-1,1))\\\n *(((2.-2.*f_p)*f_p).reshape(1,-1)))/sig_2\n # Need to be bigger\n h = np.concatenate((np.zeros(n*m),h_))\n Theta = block_diag(np.zeros((n*m,n*m)),Theta_)\n Theta += np.diag(np.diag(Theta)) ; Theta *= 0.5\n # Can now calculate the corrections (transformed variables units)\n cov_new_inv = inv(cov)+Xi+Phi+Theta\n cov_new = inv(cov_new_inv)\n var_corr = cov_new@(y+g+h)\n # Transform to correct variables. \n p_corr = Logy.transform(k_p) + var_corr[:n*m]\n q_corr = ArcTanh.transform(f_p) + var_corr[-m:]\n k_out = Logy.inverse_transform(p_corr)\n f_out = ArcTanh.inverse_transform(q_corr)\n # Transform covariance \n tr_vec = np.concatenate((k_out,2*f_out*(1.-f_out)))\n cov_out = tr_vec.reshape(-1,1)@tr_vec.reshape(1,-1)*cov_new\n # Store\n k_prd_new.append(k_out)\n k_unc_new.append(np.diag(cov_out[:n*m]))\n f_prd_new.append(f_out)\n f_unc_new.append(np.diag(cov_out)[-m:])\n return k_prd_new,k_unc_new,f_prd_new,f_unc_new\n\ndef reshape_cov2sub(X_orig,cov,y=None):\n \"\"\"\n Reshape the covariance array for predictions into a list of sub-covariance\n matrices, each corresponding to the covariance of predictions for a single\n given entry. Note this returns a ragged list of matrices, i.e. matrices\n do NOT contain entries for elements not present in input. \n\n Parameters\n ----------\n X_orig (pd.Dataframe) : The original input data. Used to get zero entries. \n cov (ndarray) : Covariance matrix to reshape. Provide as tuples to get joined arrays as outputs.\n y (ndarray) : Optional. Provide predictions and reshape these in the same way too.\n \"\"\"\n tuple_flag = True if isinstance(cov,tuple) else False\n at_nums = HRrep.get_els(X_orig)\n m = len(at_nums)\n mask = ~(X_orig.loc[:,\"Composition\"].values.flatten()==0.)\n locs = mask.reshape(-1,m) # Locations of non-zero components\n sub_cov = [] # sub-covariance matrix list\n if y is not None:\n sub_y = []\n start_ind = 0\n for entry in locs:\n end_ind = start_ind + entry.sum()\n if tuple_flag:\n sub_covs = (cov_[start_ind:end_ind,start_ind:end_ind] for cov_ in cov)\n sub_cov += [block_diag(*sub_covs)]\n else:\n sub_cov += [cov[start_ind:end_ind,start_ind:end_ind]]\n if y is not None:\n if tuple_flag:\n sub_ys = tuple(y_[start_ind:end_ind] for y_ in y)\n sub_y += [np.concatenate(sub_ys)]\n else:\n sub_y += [y[start_ind:end_ind]]\n start_ind = copy(end_ind)\n if y is not None:\n return sub_cov,sub_y\n else: \n return sub_cov", "sub_path": "partitioning/ms_toolkit.py", "file_name": "ms_toolkit.py", "file_ext": "py", "file_size_in_byte": 107095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "warnings.simplefilter", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.errors", "line_number": 48, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.GaussianProcessRegressor", "line_number": 191, "usage_type": "name"}, {"api_name": "scipy.optimize.minimize", "line_number": 216, "usage_type": "call"}, {"api_name": "sklearn.utils.optimize._check_optimize_result", "line_number": 224, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels", "line_number": 236, "usage_type": "attribute"}, {"api_name": "sklearn.gaussian_process", "line_number": 236, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.inner", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.inner", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 304, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 304, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels", "line_number": 313, "usage_type": "attribute"}, {"api_name": "sklearn.gaussian_process", "line_number": 313, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 340, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels._check_length_scale", "line_number": 342, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels", "line_number": 342, "usage_type": "attribute"}, {"api_name": "sklearn.gaussian_process", "line_number": 342, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.pdist", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 345, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 348, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 361, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 364, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 364, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 368, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 370, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 392, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 440, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 547, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 547, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 548, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 548, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 549, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 549, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 551, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 582, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 582, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 582, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 582, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 584, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 602, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 602, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 602, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 602, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 604, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 615, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 615, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 615, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 615, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 617, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 628, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 628, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 628, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 628, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 630, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 664, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 665, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 688, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 688, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 689, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 689, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 690, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 690, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 691, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 691, "usage_type": "call"}, {"api_name": "ase.data.atomic_numbers.get", "line_number": 729, "usage_type": "call"}, {"api_name": "ase.data.atomic_numbers", "line_number": 729, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 729, "usage_type": "name"}, {"api_name": "numpy.repeat", "line_number": 762, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 765, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 768, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 778, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 778, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 778, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 783, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 787, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 787, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 787, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 789, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 789, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 789, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 800, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 801, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 838, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 844, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 847, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 875, "usage_type": "call"}, {"api_name": "numpy.ma.log", "line_number": 896, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 896, "usage_type": "attribute"}, {"api_name": "numpy.repeat", "line_number": 911, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 913, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 929, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 943, "usage_type": "call"}, {"api_name": "numpy.ma.log", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 953, "usage_type": "attribute"}, {"api_name": "numpy.repeat", "line_number": 967, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 969, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 995, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 1017, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 1019, "usage_type": "attribute"}, {"api_name": "numpy.nan_to_num", "line_number": 1020, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1028, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 1031, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1034, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1044, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 1047, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1050, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1061, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 1064, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1066, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1073, "usage_type": "call"}, {"api_name": "numpy.diag_indices", "line_number": 1074, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1076, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1090, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1101, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1111, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1124, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1137, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1149, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 1163, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 1165, "usage_type": "attribute"}, {"api_name": "numpy.nan_to_num", "line_number": 1166, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1168, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 1182, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 1184, "usage_type": "attribute"}, {"api_name": "numpy.nan_to_num", "line_number": 1185, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1186, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 1197, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 1197, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1211, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1212, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols", "line_number": 1212, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 1212, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1233, "usage_type": "call"}, {"api_name": "ase.data.chemical_symbols", "line_number": 1234, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 1234, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 1235, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 1261, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 1264, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1276, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1277, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 1290, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 1307, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 1312, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1312, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 1329, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 1330, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 1335, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 1336, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 1337, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 1338, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 1339, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 1340, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1359, "usage_type": "call"}, {"api_name": "ase.data.atomic_numbers.get", "line_number": 1359, "usage_type": "call"}, {"api_name": "ase.data.atomic_numbers", "line_number": 1359, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 1359, "usage_type": "name"}, {"api_name": "numpy.atleast_2d", "line_number": 1360, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 1365, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 1366, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1370, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1372, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1377, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1380, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 1382, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 1387, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 1388, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 1392, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 1395, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1396, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 1397, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 1397, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 1398, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1398, "usage_type": "call"}, {"api_name": "numpy.atleast_3d", "line_number": 1398, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 1400, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 1415, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1474, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1485, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1488, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1489, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1501, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1503, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1513, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 1515, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1547, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 1549, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1558, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1560, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 1563, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1566, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 1568, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 1588, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 1592, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 1597, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 1602, "usage_type": "call"}, {"api_name": "numpy.arctanh", "line_number": 1619, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 1623, "usage_type": "call"}, {"api_name": "numpy.cosh", "line_number": 1628, "usage_type": "call"}, {"api_name": "numpy.cosh", "line_number": 1633, "usage_type": "call"}, {"api_name": "numpy.random.default_rng", "line_number": 1671, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1671, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 1676, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1676, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 1679, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1683, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 1692, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 1693, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 1695, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1696, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1696, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1733, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1734, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1743, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1744, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1745, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1749, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 1780, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 1780, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1795, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1795, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1800, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 1806, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 1835, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 1835, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1846, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1847, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 1854, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1856, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1864, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 1906, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 1923, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 1923, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 1932, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 1937, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1945, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 1972, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 1972, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 1977, "usage_type": "call"}, {"api_name": "numpy.random.default_rng", "line_number": 1994, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1994, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 1998, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 1999, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 2000, "usage_type": "call"}, {"api_name": "gpytorch.mlls.ExactMarginalLogLikelihood", "line_number": 2021, "usage_type": "call"}, {"api_name": "gpytorch.mlls", "line_number": 2021, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 2026, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 2026, "usage_type": "attribute"}, {"api_name": "torch.optim.LBFGS", "line_number": 2029, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 2029, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 2056, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2060, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 2084, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 2088, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 2091, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 2102, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 2107, "usage_type": "call"}, {"api_name": "gpytorch.settings.fast_pred_var", "line_number": 2107, "usage_type": "call"}, {"api_name": "gpytorch.settings", "line_number": 2107, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 2114, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2114, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 2120, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2126, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 2140, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 2153, "usage_type": "call"}, {"api_name": "numpy.ma.masked_where", "line_number": 2183, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 2183, "usage_type": "attribute"}, {"api_name": "numpy.ma.masked_where", "line_number": 2184, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 2184, "usage_type": "attribute"}, {"api_name": "numpy.ma.masked_where", "line_number": 2186, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 2186, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 2212, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 2213, "usage_type": "attribute"}, {"api_name": "numpy.nansum", "line_number": 2214, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 2215, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 2217, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 2223, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 2223, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 2240, "usage_type": "call"}, {"api_name": "ase.data.atomic_numbers.get", "line_number": 2241, "usage_type": "call"}, {"api_name": "ase.data.atomic_numbers", "line_number": 2241, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 2241, "usage_type": "name"}, {"api_name": "ase.build.bulk", "line_number": 2243, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 2244, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 2245, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 2246, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 2247, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 2248, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 2257, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 2258, "usage_type": "attribute"}, {"api_name": "ase.build.bulk", "line_number": 2281, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 2282, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 2285, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 2293, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 2298, "usage_type": "call"}, {"api_name": "shells.shell_radii", "line_number": 2299, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 2307, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 2308, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 2309, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 2310, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 2310, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 2311, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 2312, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2330, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2331, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2340, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 2341, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2343, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2344, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2349, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2352, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2355, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 2358, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2359, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 2364, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 2367, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2377, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2378, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2378, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2378, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2381, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 2382, "usage_type": "call"}, {"api_name": "dill.load", "line_number": 2389, "usage_type": "call"}, {"api_name": "dill.load", "line_number": 2392, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 2418, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 2418, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2425, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2430, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2431, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2440, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2444, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2446, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2448, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2450, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2451, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2452, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2453, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2454, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2455, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 2483, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 2483, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2490, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2496, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2496, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2496, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2498, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2499, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 2505, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 2508, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2508, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 2509, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2514, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2524, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2525, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2526, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2527, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2528, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2529, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2557, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2568, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2575, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2576, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2577, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2578, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2579, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2581, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2582, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2586, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2589, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2592, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2595, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2612, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2617, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2621, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2621, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2622, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2622, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2623, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2625, "usage_type": "call"}, {"api_name": "scipy.linalg.inv", "line_number": 2626, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2634, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2638, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 2640, "usage_type": "call"}, {"api_name": "scipy.linalg.block_diag", "line_number": 2669, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2675, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 2678, "usage_type": "call"}]} +{"seq_id": "519528114", "text": "from unittest import mock\n\nfrom tests.factories import AttrFactory\nfrom tests.factories import AttrTypeFactory\nfrom tests.factories import ClassFactory\nfrom tests.factories import ExtensionFactory\nfrom tests.factories import FactoryTestCase\nfrom xsdata.codegen.container import ClassContainer\nfrom xsdata.codegen.models import Class\nfrom xsdata.codegen.models import Restrictions\nfrom xsdata.codegen.sanitizer import ClassSanitizer\nfrom xsdata.models.config import GeneratorConfig\nfrom xsdata.models.enums import Namespace\nfrom xsdata.models.enums import Tag\nfrom xsdata.models.xsd import ComplexType\nfrom xsdata.models.xsd import Element\n\n\nclass ClassSanitizerTest(FactoryTestCase):\n def setUp(self):\n super().setUp()\n\n self.config = GeneratorConfig()\n self.container = ClassContainer()\n self.sanitizer = ClassSanitizer(container=self.container, config=self.config)\n\n @mock.patch.object(ClassSanitizer, \"resolve_conflicts\")\n @mock.patch.object(ClassSanitizer, \"process_class\")\n def test_process(self, mock_process_class, mock_resolve_conflicts):\n classes = ClassFactory.list(2)\n\n self.sanitizer.container.extend(classes)\n ClassSanitizer.process(self.container, self.config)\n\n mock_process_class.assert_has_calls(list(map(mock.call, classes)))\n mock_resolve_conflicts.assert_called_once_with()\n\n @mock.patch.object(ClassSanitizer, \"process_duplicate_attribute_names\")\n @mock.patch.object(ClassSanitizer, \"process_attribute_sequence\")\n @mock.patch.object(ClassSanitizer, \"process_attribute_restrictions\")\n @mock.patch.object(ClassSanitizer, \"process_attribute_default\")\n def test_process_class(\n self,\n mock_process_attribute_default,\n mock_process_attribute_restrictions,\n mock_process_attribute_sequence,\n mock_process_duplicate_attribute_names,\n ):\n target = ClassFactory.elements(2)\n inner = ClassFactory.elements(1)\n target.inner.append(inner)\n\n self.sanitizer.process_class(target)\n\n calls_with_target = [\n mock.call(target.inner[0], target.inner[0].attrs[0]),\n mock.call(target, target.attrs[0]),\n mock.call(target, target.attrs[1]),\n ]\n\n calls_without_target = [\n mock.call(target.inner[0].attrs[0]),\n mock.call(target.attrs[0]),\n mock.call(target.attrs[1]),\n ]\n\n mock_process_attribute_default.assert_has_calls(calls_with_target)\n mock_process_attribute_restrictions.assert_has_calls(calls_without_target)\n mock_process_attribute_sequence.assert_has_calls(calls_with_target)\n mock_process_duplicate_attribute_names.assert_has_calls(\n [mock.call(target.inner[0].attrs), mock.call(target.attrs)]\n )\n\n @mock.patch.object(ClassSanitizer, \"group_compound_fields\")\n def test_process_class_group_compound_fields(self, mock_group_compound_fields):\n target = ClassFactory.create()\n inner = ClassFactory.create()\n target.inner.append(inner)\n\n self.config.output.compound_fields = True\n self.sanitizer.process_class(target)\n\n mock_group_compound_fields.assert_has_calls(\n [\n mock.call(inner),\n mock.call(target),\n ]\n )\n\n def test_process_attribute_default_with_enumeration(self):\n target = ClassFactory.create()\n attr = AttrFactory.enumeration()\n attr.restrictions.max_occurs = 2\n attr.fixed = True\n\n self.sanitizer.process_attribute_default(target, attr)\n self.assertTrue(attr.fixed)\n\n def test_process_attribute_default_with_list_field(self):\n target = ClassFactory.create()\n attr = AttrFactory.create(fixed=True)\n attr.restrictions.max_occurs = 2\n self.sanitizer.process_attribute_default(target, attr)\n self.assertFalse(attr.fixed)\n\n def test_process_attribute_default_with_optional_field(self):\n target = ClassFactory.create()\n attr = AttrFactory.create(fixed=True, default=2)\n attr.restrictions.min_occurs = 0\n self.sanitizer.process_attribute_default(target, attr)\n self.assertFalse(attr.fixed)\n self.assertIsNone(attr.default)\n\n def test_process_attribute_default_with_xsi_type(self):\n target = ClassFactory.create()\n attr = AttrFactory.create(\n fixed=True, default=2, name=\"type\", namespace=Namespace.XSI.uri\n )\n self.sanitizer.process_attribute_default(target, attr)\n self.assertFalse(attr.fixed)\n self.assertIsNone(attr.default)\n\n def test_process_attribute_default_with_valid_case(self):\n target = ClassFactory.create()\n attr = AttrFactory.create(fixed=True, default=2)\n self.sanitizer.process_attribute_default(target, attr)\n self.assertTrue(attr.fixed)\n self.assertEqual(2, attr.default)\n\n @mock.patch(\"xsdata.codegen.sanitizer.logger.warning\")\n @mock.patch.object(ClassSanitizer, \"promote_inner_class\")\n @mock.patch.object(ClassSanitizer, \"find_enum\")\n def test_process_attribute_default_enum(\n self, mock_find_enum, mock_promote_inner_class, mock_logger_warning\n ):\n enum_one = ClassFactory.enumeration(1, qname=\"root\")\n enum_one.attrs[0].default = \"1\"\n enum_one.attrs[0].name = \"one\"\n enum_two = ClassFactory.enumeration(1, qname=\"inner\")\n enum_two.attrs[0].default = \"2\"\n enum_two.attrs[0].name = \"two\"\n enum_three = ClassFactory.enumeration(1, qname=\"missing_member\")\n\n mock_find_enum.side_effect = [\n None,\n enum_one,\n None,\n enum_two,\n enum_three,\n ]\n\n target = ClassFactory.create(\n qname=\"target\",\n attrs=[\n AttrFactory.create(\n types=[\n AttrTypeFactory.create(),\n AttrTypeFactory.create(qname=\"foo\"),\n ],\n default=\"1\",\n ),\n AttrFactory.create(\n types=[\n AttrTypeFactory.create(),\n AttrTypeFactory.create(qname=\"bar\", forward=True),\n ],\n default=\"2\",\n ),\n AttrFactory.create(default=\"3\"),\n ],\n )\n\n actual = []\n for attr in target.attrs:\n self.sanitizer.process_attribute_default(target, attr)\n actual.append(attr.default)\n\n self.assertEqual([\"@enum@root::one\", \"@enum@inner::two\", None], actual)\n mock_promote_inner_class.assert_called_once_with(target, enum_two)\n mock_logger_warning.assert_called_once_with(\n \"No enumeration member matched %s.%s default value `%s`\",\n target.name,\n target.attrs[2].local_name,\n \"3\",\n )\n\n def test_promote_inner_class(self):\n target = ClassFactory.elements(2, qname=\"parent\")\n inner = ClassFactory.create(qname=\"{foo}bar\")\n\n target.inner.append(inner)\n target.attrs[1].types.append(AttrTypeFactory.create(forward=True, qname=\"bar\"))\n\n clone_target = target.clone()\n\n self.container.add(target)\n self.sanitizer.promote_inner_class(target, inner)\n\n self.assertNotIn(inner, target.inner)\n\n self.assertEqual(\"{foo}parent_bar\", target.attrs[1].types[1].qname)\n self.assertFalse(target.attrs[1].types[1].forward)\n self.assertEqual(\"{foo}parent_bar\", inner.qname)\n self.assertEqual(2, len(self.container.data))\n self.assertIn(inner, self.container[\"{foo}parent_bar\"])\n\n self.assertEqual(clone_target.attrs[0], target.attrs[0])\n self.assertEqual(clone_target.attrs[1].types[0], target.attrs[1].types[0])\n\n def test_find_enum(self):\n native_type = AttrTypeFactory.create()\n matching_external = AttrTypeFactory.create(\"foo\")\n missing_external = AttrTypeFactory.create(\"bar\")\n matching_inner = AttrTypeFactory.create(\"foobar\", forward=True)\n missing_inner = AttrTypeFactory.create(\"barfoo\", forward=True)\n enumeration = ClassFactory.enumeration(1, qname=\"foo\")\n inner = ClassFactory.enumeration(1, qname=\"foobar\")\n\n target = ClassFactory.create(\n attrs=[\n AttrFactory.create(\n types=[\n native_type,\n matching_external,\n missing_external,\n matching_inner,\n missing_inner,\n ]\n )\n ],\n inner=[inner],\n )\n self.sanitizer.container.extend([target, enumeration])\n\n actual = self.sanitizer.find_enum(target, native_type)\n self.assertIsNone(actual)\n\n actual = self.sanitizer.find_enum(target, matching_external)\n self.assertEqual(enumeration, actual)\n\n actual = self.sanitizer.find_enum(target, missing_external)\n self.assertIsNone(actual)\n\n actual = self.sanitizer.find_enum(target, matching_inner)\n self.assertEqual(inner, actual)\n\n actual = self.sanitizer.find_enum(target, missing_inner)\n self.assertIsNone(actual)\n\n def test_process_attribute_restrictions(self):\n restrictions = [\n Restrictions(min_occurs=0, max_occurs=0, required=True),\n Restrictions(min_occurs=0, max_occurs=1, required=True),\n Restrictions(min_occurs=1, max_occurs=1, required=False),\n Restrictions(max_occurs=2, required=True),\n Restrictions(min_occurs=2, max_occurs=2, required=True),\n ]\n expected = [\n {},\n {},\n {\"required\": True},\n {\"max_occurs\": 2},\n {\"max_occurs\": 2, \"min_occurs\": 2},\n ]\n\n for idx, res in enumerate(restrictions):\n attr = AttrFactory.create(restrictions=res)\n self.sanitizer.process_attribute_restrictions(attr)\n self.assertEqual(expected[idx], res.asdict())\n\n def test_sanitize_duplicate_attribute_names(self):\n attrs = [\n AttrFactory.create(name=\"a\", tag=Tag.ELEMENT),\n AttrFactory.create(name=\"a\", tag=Tag.ATTRIBUTE),\n AttrFactory.create(name=\"b\", tag=Tag.ATTRIBUTE),\n AttrFactory.create(name=\"c\", tag=Tag.ATTRIBUTE),\n AttrFactory.create(name=\"c\", tag=Tag.ELEMENT),\n AttrFactory.create(name=\"d\", tag=Tag.ELEMENT),\n AttrFactory.create(name=\"d\", tag=Tag.ELEMENT),\n AttrFactory.create(name=\"e\", tag=Tag.ELEMENT, namespace=\"b\"),\n AttrFactory.create(name=\"e\", tag=Tag.ELEMENT),\n AttrFactory.create(name=\"f\", tag=Tag.ELEMENT),\n AttrFactory.create(name=\"f\", tag=Tag.ELEMENT, namespace=\"a\"),\n AttrFactory.create(name=\"gA\", tag=Tag.ENUMERATION),\n AttrFactory.create(name=\"g[A]\", tag=Tag.ENUMERATION),\n AttrFactory.create(name=\"g_a\", tag=Tag.ENUMERATION),\n AttrFactory.create(name=\"g_a_1\", tag=Tag.ENUMERATION),\n ]\n\n self.sanitizer.process_duplicate_attribute_names(attrs)\n expected = [\n \"a\",\n \"a_Attribute\",\n \"b\",\n \"c_Attribute\",\n \"c\",\n \"d_Element\",\n \"d\",\n \"b_e\",\n \"e\",\n \"f\",\n \"a_f\",\n \"gA\",\n \"g[A]_2\",\n \"g_a_3\",\n \"g_a_1\",\n ]\n self.assertEqual(expected, [x.name for x in attrs])\n\n def test_sanitize_attribute_sequence(self):\n def len_sequential(target: Class):\n return len([attr for attr in target.attrs if attr.restrictions.sequential])\n\n restrictions = Restrictions(max_occurs=2, sequential=True)\n target = ClassFactory.create(\n attrs=[\n AttrFactory.create(restrictions=restrictions.clone()),\n AttrFactory.create(restrictions=restrictions.clone()),\n ]\n )\n\n attrs_clone = [attr.clone() for attr in target.attrs]\n\n self.sanitizer.process_attribute_sequence(target, target.attrs[0])\n self.assertEqual(2, len_sequential(target))\n\n target.attrs[0].restrictions.sequential = False\n self.sanitizer.process_attribute_sequence(target, target.attrs[0])\n self.assertEqual(1, len_sequential(target))\n\n self.sanitizer.process_attribute_sequence(target, target.attrs[1])\n self.assertEqual(0, len_sequential(target))\n\n target.attrs = attrs_clone\n target.attrs[1].restrictions.sequential = False\n self.sanitizer.process_attribute_sequence(target, target.attrs[0])\n self.assertEqual(0, len_sequential(target))\n\n target.attrs[0].restrictions.sequential = True\n target.attrs[0].restrictions.max_occurs = 0\n target.attrs[1].restrictions.sequential = True\n self.sanitizer.process_attribute_sequence(target, target.attrs[0])\n self.assertEqual(1, len_sequential(target))\n\n @mock.patch.object(ClassSanitizer, \"rename_classes\")\n def test_resolve_conflicts(self, mock_rename_classes):\n classes = [\n ClassFactory.create(qname=\"{foo}A\"),\n ClassFactory.create(qname=\"{foo}a\"),\n ClassFactory.create(qname=\"a\"),\n ClassFactory.create(qname=\"b\"),\n ClassFactory.create(qname=\"b\"),\n ]\n self.sanitizer.container.extend(classes)\n self.sanitizer.resolve_conflicts()\n\n mock_rename_classes.assert_has_calls(\n [\n mock.call(classes[:2]),\n mock.call(classes[3:]),\n ]\n )\n\n @mock.patch.object(ClassSanitizer, \"rename_class\")\n def test_rename_classes(self, mock_rename_class):\n classes = [\n ClassFactory.create(qname=\"a\", type=Element),\n ClassFactory.create(qname=\"A\", type=Element),\n ClassFactory.create(qname=\"a\", type=ComplexType),\n ]\n self.sanitizer.rename_classes(classes)\n\n mock_rename_class.assert_has_calls(\n [\n mock.call(classes[0]),\n mock.call(classes[1]),\n mock.call(classes[2]),\n ]\n )\n\n @mock.patch.object(ClassSanitizer, \"rename_class\")\n def test_rename_classes_protects_single_element(self, mock_rename_class):\n classes = [\n ClassFactory.create(qname=\"a\", type=Element),\n ClassFactory.create(qname=\"a\", type=ComplexType),\n ]\n self.sanitizer.rename_classes(classes)\n\n mock_rename_class.assert_called_once_with(classes[1])\n\n @mock.patch.object(ClassSanitizer, \"rename_dependency\")\n def test_rename_class(self, mock_rename_dependency):\n target = ClassFactory.create(qname=\"{foo}a\")\n self.sanitizer.container.add(target)\n self.sanitizer.container.add(ClassFactory.create())\n self.sanitizer.container.add(ClassFactory.create(qname=\"{foo}a_1\"))\n self.sanitizer.container.add(ClassFactory.create(qname=\"{foo}A_2\"))\n self.sanitizer.rename_class(target)\n\n self.assertEqual(\"{foo}a_3\", target.qname)\n self.assertEqual(\"a\", target.meta_name)\n\n mock_rename_dependency.assert_has_calls(\n mock.call(item, \"{foo}a\", \"{foo}a_3\")\n for item in self.sanitizer.container.iterate()\n )\n\n self.assertEqual([target], self.container.data[\"{foo}a_3\"])\n self.assertEqual([], self.container.data[\"{foo}a\"])\n\n def test_rename_dependency(self):\n attr_type = AttrTypeFactory.create(\"{foo}bar\")\n\n target = ClassFactory.create(\n extensions=[\n ExtensionFactory.create(),\n ExtensionFactory.create(type=attr_type.clone()),\n ],\n attrs=[\n AttrFactory.create(),\n AttrFactory.create(types=[AttrTypeFactory.create(), attr_type.clone()]),\n ],\n inner=[\n ClassFactory.create(\n extensions=[ExtensionFactory.create(type=attr_type.clone())],\n attrs=[\n AttrFactory.create(),\n AttrFactory.create(\n types=[AttrTypeFactory.create(), attr_type.clone()]\n ),\n ],\n )\n ],\n )\n\n self.sanitizer.rename_dependency(target, \"{foo}bar\", \"thug\")\n dependencies = set(target.dependencies())\n self.assertNotIn(\"{foo}bar\", dependencies)\n self.assertIn(\"thug\", dependencies)\n\n @mock.patch.object(ClassSanitizer, \"group_fields\")\n def test_group_compound_fields(self, mock_group_fields):\n target = ClassFactory.elements(8)\n # First group repeating\n target.attrs[0].restrictions.choice = \"1\"\n target.attrs[1].restrictions.choice = \"1\"\n target.attrs[1].restrictions.max_occurs = 2\n # Second group repeating\n target.attrs[2].restrictions.choice = \"2\"\n target.attrs[3].restrictions.choice = \"2\"\n target.attrs[3].restrictions.max_occurs = 2\n # Third group optional\n target.attrs[4].restrictions.choice = \"3\"\n target.attrs[5].restrictions.choice = \"3\"\n\n self.sanitizer.group_compound_fields(target)\n mock_group_fields.assert_has_calls(\n [\n mock.call(target, target.attrs[0:2]),\n mock.call(target, target.attrs[2:4]),\n ]\n )\n\n def test_group_fields(self):\n target = ClassFactory.create(attrs=AttrFactory.list(2))\n target.attrs[0].restrictions.min_occurs = 10\n target.attrs[0].restrictions.max_occurs = 15\n target.attrs[1].restrictions.min_occurs = 5\n target.attrs[1].restrictions.max_occurs = 20\n\n expected = AttrFactory.create(\n name=\"attr_B_Or_attr_C\",\n tag=\"Choice\",\n index=0,\n types=[AttrTypeFactory.xs_any()],\n choices=[\n AttrFactory.create(\n tag=target.attrs[0].tag,\n name=\"attr_B\",\n types=target.attrs[0].types,\n ),\n AttrFactory.create(\n tag=target.attrs[1].tag,\n name=\"attr_C\",\n types=target.attrs[1].types,\n ),\n ],\n )\n expected_res = Restrictions(min_occurs=5, max_occurs=20)\n\n self.sanitizer.group_fields(target, list(target.attrs))\n self.assertEqual(1, len(target.attrs))\n self.assertEqual(expected, target.attrs[0])\n self.assertEqual(expected_res, target.attrs[0].restrictions)\n\n def test_group_fields_limit_name(self):\n target = ClassFactory.create(attrs=AttrFactory.list(3))\n self.sanitizer.group_fields(target, list(target.attrs))\n\n self.assertEqual(1, len(target.attrs))\n self.assertEqual(\"attr_B_Or_attr_C_Or_attr_D\", target.attrs[0].name)\n\n target = ClassFactory.create(attrs=AttrFactory.list(4))\n self.sanitizer.group_fields(target, list(target.attrs))\n self.assertEqual(\"choice\", target.attrs[0].name)\n\n def test_build_attr_choice(self):\n attr = AttrFactory.create(\n name=\"a\", namespace=\"xsdata\", default=\"123\", help=\"help\", fixed=True\n )\n attr.local_name = \"aaa\"\n attr.restrictions = Restrictions(\n required=True,\n prohibited=None,\n min_occurs=1,\n max_occurs=1,\n min_exclusive=\"1.1\",\n min_inclusive=\"1\",\n min_length=1,\n max_exclusive=\"1\",\n max_inclusive=\"1.1\",\n max_length=10,\n total_digits=333,\n fraction_digits=2,\n length=5,\n white_space=\"collapse\",\n pattern=r\"[A-Z]\",\n explicit_timezone=\"+1\",\n nillable=True,\n choice=\"abc\",\n sequential=True,\n )\n expected_res = attr.restrictions.clone()\n expected_res.min_occurs = None\n expected_res.max_occurs = None\n expected_res.sequential = None\n\n actual = self.sanitizer.build_attr_choice(attr)\n\n self.assertEqual(attr.local_name, actual.name)\n self.assertEqual(attr.namespace, actual.namespace)\n self.assertEqual(attr.default, actual.default)\n self.assertEqual(attr.tag, actual.tag)\n self.assertEqual(attr.types, actual.types)\n self.assertEqual(expected_res, actual.restrictions)\n self.assertEqual(attr.help, actual.help)\n self.assertFalse(actual.fixed)\n", "sub_path": "tests/codegen/test_sanitizer.py", "file_name": "test_sanitizer.py", "file_ext": "py", "file_size_in_byte": 20688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "tests.factories.FactoryTestCase", "line_number": 19, "usage_type": "name"}, {"api_name": "xsdata.models.config.GeneratorConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "xsdata.codegen.container.ClassContainer", "line_number": 24, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 25, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory.list", "line_number": 30, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 30, "usage_type": "name"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer.process", "line_number": 33, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 33, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 35, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 35, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 27, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 27, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 27, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 27, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 28, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 28, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 28, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 28, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.elements", "line_number": 49, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 49, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.elements", "line_number": 50, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 50, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 56, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 56, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 57, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 57, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 58, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 58, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 62, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 62, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 63, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 63, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 64, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 64, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 71, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 71, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 38, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 38, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 38, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 39, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 39, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 39, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 39, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 40, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 40, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 40, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 40, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 41, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 41, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 41, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 41, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 76, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 76, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 77, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 77, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 85, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 85, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 86, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 86, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 74, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 74, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 74, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 74, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 91, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 91, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.enumeration", "line_number": 92, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 92, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 100, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 100, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 101, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 101, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 107, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 107, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 108, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 108, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 115, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 115, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 116, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 116, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Namespace.XSI", "line_number": 117, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Namespace", "line_number": 117, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 124, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 124, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 125, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 125, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.enumeration", "line_number": 136, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 136, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.enumeration", "line_number": 139, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 139, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.enumeration", "line_number": 142, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 142, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 152, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 152, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 155, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 155, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 157, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 157, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 158, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 158, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 162, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 162, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 164, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 164, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 165, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 165, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 169, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 169, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 130, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 130, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 131, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 131, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 131, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 131, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 132, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 132, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 132, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 132, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.elements", "line_number": 188, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 188, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 189, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 189, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 192, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 192, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 211, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 211, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 212, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 212, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 213, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 213, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 214, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 214, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 215, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 215, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.enumeration", "line_number": 216, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 216, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.enumeration", "line_number": 217, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 217, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 219, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 219, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 221, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 221, "usage_type": "name"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 252, "usage_type": "call"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 253, "usage_type": "call"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 254, "usage_type": "call"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 255, "usage_type": "call"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 256, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 267, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 267, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 273, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 273, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 273, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 273, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 274, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 274, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ATTRIBUTE", "line_number": 274, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 274, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 275, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 275, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ATTRIBUTE", "line_number": 275, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 275, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 276, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 276, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ATTRIBUTE", "line_number": 276, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 276, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 277, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 277, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 277, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 277, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 278, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 278, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 278, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 278, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 279, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 279, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 279, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 279, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 280, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 280, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 280, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 280, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 281, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 281, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 281, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 281, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 282, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 282, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 282, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 282, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 283, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 283, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ELEMENT", "line_number": 283, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 283, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 284, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 284, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ENUMERATION", "line_number": 284, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 284, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 285, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 285, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ENUMERATION", "line_number": 285, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 285, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 286, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 286, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ENUMERATION", "line_number": 286, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 286, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 287, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 287, "usage_type": "name"}, {"api_name": "xsdata.models.enums.Tag.ENUMERATION", "line_number": 287, "usage_type": "attribute"}, {"api_name": "xsdata.models.enums.Tag", "line_number": 287, "usage_type": "name"}, {"api_name": "xsdata.codegen.models.Class", "line_number": 311, "usage_type": "name"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 314, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 315, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 315, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 317, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 317, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 318, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 318, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 348, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 348, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 349, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 349, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 350, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 350, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 351, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 351, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 352, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 352, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 359, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 359, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 360, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 360, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 345, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 345, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 345, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 345, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 367, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 367, "usage_type": "name"}, {"api_name": "xsdata.models.xsd.Element", "line_number": 367, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 368, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 368, "usage_type": "name"}, {"api_name": "xsdata.models.xsd.Element", "line_number": 368, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 369, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 369, "usage_type": "name"}, {"api_name": "xsdata.models.xsd.ComplexType", "line_number": 369, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 375, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 375, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 376, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 376, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 377, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 377, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 364, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 364, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 364, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 364, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 384, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 384, "usage_type": "name"}, {"api_name": "xsdata.models.xsd.Element", "line_number": 384, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 385, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 385, "usage_type": "name"}, {"api_name": "xsdata.models.xsd.ComplexType", "line_number": 385, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 381, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 381, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 381, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 381, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 393, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 393, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 395, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 395, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 396, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 396, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 397, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 397, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 404, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 404, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 391, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 391, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 391, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 391, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 412, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 412, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 414, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 414, "usage_type": "name"}, {"api_name": "tests.factories.ExtensionFactory.create", "line_number": 416, "usage_type": "call"}, {"api_name": "tests.factories.ExtensionFactory", "line_number": 416, "usage_type": "name"}, {"api_name": "tests.factories.ExtensionFactory.create", "line_number": 417, "usage_type": "call"}, {"api_name": "tests.factories.ExtensionFactory", "line_number": 417, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 420, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 420, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 421, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 421, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 421, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 421, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 424, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 424, "usage_type": "name"}, {"api_name": "tests.factories.ExtensionFactory.create", "line_number": 425, "usage_type": "call"}, {"api_name": "tests.factories.ExtensionFactory", "line_number": 425, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 427, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 427, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 428, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 428, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.create", "line_number": 429, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 429, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.elements", "line_number": 443, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 443, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 459, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 459, "usage_type": "name"}, {"api_name": "unittest.mock.call", "line_number": 460, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 460, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 441, "usage_type": "call"}, {"api_name": "xsdata.codegen.sanitizer.ClassSanitizer", "line_number": 441, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 441, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 441, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 465, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 465, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.list", "line_number": 465, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 465, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 471, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 471, "usage_type": "name"}, {"api_name": "tests.factories.AttrTypeFactory.xs_any", "line_number": 475, "usage_type": "call"}, {"api_name": "tests.factories.AttrTypeFactory", "line_number": 475, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 477, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 477, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 482, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 482, "usage_type": "name"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 489, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 497, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 497, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.list", "line_number": 497, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 497, "usage_type": "name"}, {"api_name": "tests.factories.ClassFactory.create", "line_number": 503, "usage_type": "call"}, {"api_name": "tests.factories.ClassFactory", "line_number": 503, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.list", "line_number": 503, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 503, "usage_type": "name"}, {"api_name": "tests.factories.AttrFactory.create", "line_number": 508, "usage_type": "call"}, {"api_name": "tests.factories.AttrFactory", "line_number": 508, "usage_type": "name"}, {"api_name": "xsdata.codegen.models.Restrictions", "line_number": 512, "usage_type": "call"}]} +{"seq_id": "425009572", "text": "# Mechanics IO\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom math import cos, sin, asin, atan2\nfrom scipy import constants\n\nimport numpy as np\nimport h5py\nimport bisect\nimport time\n\nimport tempfile\nfrom contextlib import contextmanager\n\n# Siconos imports\nimport siconos.numerics as Numerics\nfrom siconos.kernel import \\\n EqualityConditionNSL, \\\n Interaction, DynamicalSystem, TimeStepping\nimport siconos.kernel as Kernel\n\n# Siconos Mechanics imports\nfrom siconos.mechanics.collision.tools import Contactor, Volume\nfrom siconos.mechanics import joints\nfrom siconos.io.io_base import MechanicsIO\nfrom siconos.io.FrictionContactTrace import FrictionContactTrace\n\n# Currently we must choose between two implementations\nuse_original = False\nuse_proposed = True\n\n\ndef set_implementation(i):\n global use_original, use_proposed\n if i == 'original':\n use_original = have_original\n use_proposed = not have_original\n setup_default_classes()\n return use_original\n elif i == 'proposed':\n use_proposed = have_proposed\n use_original = not have_original\n setup_default_classes()\n return use_proposed\n return False\n\n# For 'proposed' implementation, it is necessary to select a back-end,\n# although currently only Bullet is supported for general objects.\nbackend = 'bullet'\n\n\ndef set_backend(b):\n global backend\n backend = b\n setup_default_classes()\n\nhave_proposed = False\nhave_original = False\nhave_bullet = False\nhave_occ = False\n\n# Imports for 'proposed' implementation\ntry:\n from siconos.mechanics.collision import BodyDS, \\\n SiconosSphere, SiconosBox, SiconosCylinder, SiconosPlane, \\\n SiconosConvexHull, SiconosContactor, SiconosContactorSet, \\\n SiconosMesh, SiconosHeightMap\n\n try:\n from siconos.mechanics.collision.bullet import \\\n SiconosBulletCollisionManager, SiconosBulletOptions\n have_bullet = True\n except:\n have_bullet = False\n\n have_proposed = True\nexcept:\n have_proposed = False\n use_proposed = False\n\n# Imports for 'original' implementation\ntry:\n from siconos.mechanics.collision.bullet import \\\n BulletDS, BulletWeightedShape, btScalarSize, \\\n btCollisionObject, BulletTimeStepping, BulletSpaceFilter\n\n from siconos.mechanics.collision.bullet import btVector3, \\\n btConvexHullShape, btCylinderShape, btBoxShape, btSphereShape, \\\n btConeShape, btCapsuleShape, btCompoundShape, btTriangleIndexVertexArray, \\\n btGImpactMeshShape\n have_bullet = True\n have_original = True\n\nexcept:\n have_original = False\n use_original = False\n\n# Shared Bullet imports\ntry:\n from siconos.mechanics.collision.bullet import \\\n btScalarSize, btQuaternion, btTransform, \\\n btVector3, quatRotate\n from siconos.mechanics.collision.bullet import \\\n __mul__ as mul\nexcept:\n have_bullet = False\n\n# OCC imports\ntry:\n from siconos.mechanics import occ\n have_occ = True\nexcept:\n have_occ = False\n\n### Configuration\n\ndefault_manager_class = None\ndefault_simulation_class = None\ndefault_body_class = None\nuse_bullet = False\n\ndef setup_default_classes():\n global default_manager_class\n global default_simulation_class\n global default_body_class\n global use_bullet\n if use_proposed:\n if backend == 'bullet':\n def m(model, options):\n if options is None:\n options = SiconosBulletOptions()\n return SiconosBulletCollisionManager(options)\n default_manager_class = m\n use_bullet = have_bullet\n default_simulation_class = TimeStepping\n default_body_class = BodyDS\n elif use_original:\n if backend == 'bullet':\n default_manager_class = lambda model,options: BulletSpaceFilter(model)\n default_simulation_class = BulletTimeStepping\n default_body_class = BulletDS\n use_bullet = have_bullet\n elif backend == 'occ':\n default_manager_class = lambda model,options: occ.OccSpaceFilter(model)\n default_simulation_class = occ.OccTimeStepping\n default_body_class = occ.OccBody\n use_bullet = have_bullet\n\nsetup_default_classes()\n\n### Utility functions\n\ndef floatv(v):\n return [float(x) for x in v]\n\n\ndef arguments():\n \"\"\"Returns tuple containing dictionary of calling function's\n named arguments and a list of calling function's unnamed\n positional arguments.\n \"\"\"\n from inspect import getargvalues, stack\n posname, kwname, args = getargvalues(stack()[1][0])[-3:]\n posargs = args.pop(posname, [])\n args.update(args.pop(kwname, []))\n return args, posargs\n\n\n@contextmanager\ndef tmpfile(suffix='', prefix='siconos_io', contents=None):\n \"\"\"\n A context manager for a named temporary file.\n \"\"\"\n (_, tfilename) = tempfile.mkstemp(suffix=suffix, prefix=prefix)\n fid = open(tfilename, 'w')\n if contents is not None:\n fid.write(contents)\n fid.flush()\n\n class TmpFile:\n\n def __init__(self, fid, name):\n self.fid = fid\n self.name = name\n\n def __getitem__(self, n):\n if n == 0:\n return self.fid\n elif n == 1:\n return self.name\n else:\n raise IndexError\n\n r = TmpFile(fid, tfilename)\n\n yield r\n fid.close()\n os.remove(tfilename)\n\n\nclass Timer():\n\n def __init__(self):\n self._t0 = time.clock()\n\n def elapsed(self):\n return time.clock() - self._t0\n\n def update(self):\n self._t0 = time.clock()\n\n\ndef warn(msg):\n sys.stderr.write('{0}: {1}'.format(sys.argv[0], msg))\n\n\ndef log(fun, with_timer=False):\n if with_timer:\n t = Timer()\n\n def logged(*args):\n t.update()\n print('{0} ...'.format(fun.__name__), end='')\n fun(*args)\n print('..... {0} s'.format(t.elapsed()))\n return logged\n else:\n def silent(*args):\n fun(*args)\n return silent\n\n\ndef object_id(obj):\n \"\"\"returns an unique object identifier\"\"\"\n return obj.__hash__()\n\n\ndef apply_gravity(body):\n g = constants.g\n weight = [0, 0, - body.scalarMass() * g]\n body.setFExtPtr(weight)\n\n\ndef group(h, name, must_exist=True):\n try:\n return h[name]\n except KeyError:\n if must_exist:\n return h.create_group(name)\n else:\n try:\n return h.create_group(name)\n except ValueError:\n # could not create group, return None\n # (file is probably in read-only mode)\n return None\n\ndef data(h, name, nbcolumns, use_compression=False):\n try:\n return h[name]\n except KeyError:\n comp = use_compression and nbcolumns > 0\n return h.create_dataset(name, (0, nbcolumns),\n maxshape=(None, nbcolumns),\n chunks=[None,(4000,nbcolumns)][comp],\n compression=[None,'gzip'][comp],\n compression_opts=[None,9][comp])\n\n\ndef add_line(dataset, line):\n dataset.resize(dataset.shape[0] + 1, 0)\n dataset[dataset.shape[0] - 1, :] = line\n\n\ndef str_of_file(filename):\n with open(filename, 'r') as f:\n return str(f.read())\n\n\nclass Quaternion():\n\n def __init__(self, *args):\n import vtk\n self._vtkmath = vtk.vtkMath()\n self._data = vtk.vtkQuaternion[float](*args)\n\n def __mul__(self, q):\n r = Quaternion()\n self._vtkmath.MultiplyQuaternion(self._data, q._data, r._data)\n return r\n\n def __getitem__(self, i):\n return self._data[i]\n\n def conjugate(self):\n r = Quaternion((self[0], self[1], self[2], self[3]))\n r._data.Conjugate()\n return r\n\n def rotate(self, v):\n pv = Quaternion((0, v[0], v[1], v[2]))\n rv = self * pv * self.conjugate()\n # assert(rv[0] == 0)\n return [rv[1], rv[2], rv[3]]\n\n def axisAngle(self):\n r = [0, 0, 0]\n a = self._data.GetRotationAngleAndAxis(r)\n return r, a\n\n\ndef phi(q0, q1, q2, q3):\n \"\"\"\n Euler angle phi from quaternion.\n \"\"\"\n return atan2(2*(q0*q1+q2*q3), 1-2*(q1*q1+q2*q2))\n\n\ndef theta(q0, q1, q2, q3):\n \"\"\"\n Euler angle theta from quaternion.\n \"\"\"\n return asin(2*(q0*q2-q3*q1))\n\n\ndef psi(q0, q1, q2, q3):\n \"\"\"\n Euler angle psi from quaternion.\n \"\"\"\n return atan2(2*(q0*q3+q1*q2), 1-2*(q2*q2+q3*q3))\n\n# vectorized versions\nphiv = np.vectorize(phi)\nthetav = np.vectorize(theta)\npsiv = np.vectorize(psi)\n\n\n#\n# load .vtp file\n#\ndef loadMesh(shape_filename, collision_margin, scale=None):\n \"\"\"\n loads a vtk .vtp file and returns a Bullet concave shape\n WARNING triangles cells assumed!\n \"\"\"\n\n import vtk\n\n reader = vtk.vtkXMLPolyDataReader()\n reader.SetFileName(shape_filename)\n reader.Update()\n\n polydata = reader.GetOutput()\n points = polydata.GetPoints().GetData()\n num_points = points.GetNumberOfTuples()\n num_triangles = polydata.GetNumberOfCells()\n\n keep = None\n shape = None\n\n if polydata.GetCellType(0) == 5:\n apoints = np.empty((num_points, 3), dtype={4:'f4',8:'f8'}[btScalarSize()])\n for i in range(0, points.GetNumberOfTuples()):\n p = points.GetTuple(i)\n apoints[i, 0] = p[0]\n apoints[i, 1] = p[1]\n apoints[i, 2] = p[2]\n\n if scale is not None:\n apoints *= scale\n\n aindices = np.empty((num_triangles, 3), dtype=np.int32)\n\n for i in range(0, num_triangles):\n c = polydata.GetCell(i)\n aindices[i, 0] = c.GetPointIds().GetId(0)\n aindices[i, 1] = c.GetPointIds().GetId(1)\n aindices[i, 2] = c.GetPointIds().GetId(2)\n\n tri = btTriangleIndexVertexArray(apoints, aindices)\n\n shape = btGImpactMeshShape(tri)\n shape.updateBound()\n\n keep = tri, apoints, aindices\n\n else: # assume convex shape\n coors = dict()\n for i in range(0, points.GetNumberOfTuples()):\n coors[points.GetTuple(i)] = 1\n\n shape = btConvexHullShape()\n shape.setMargin(collision_margin)\n for p in coors:\n shape.addPoint(btVector3(*p))\n\n return keep, shape\n\ndef loadSiconosMesh(shape_filename, scale=None):\n \"\"\"\n loads a vtk .vtp file and returns a SiconosMesh shape\n WARNING triangles cells assumed!\n \"\"\"\n import vtk\n\n reader = vtk.vtkXMLPolyDataReader()\n reader.SetFileName(shape_filename)\n reader.Update()\n\n polydata = reader.GetOutput()\n points = polydata.GetPoints().GetData()\n num_points = points.GetNumberOfTuples()\n num_triangles = polydata.GetNumberOfCells()\n\n keep = None\n shape = None\n\n if polydata.GetCellType(0) == 5:\n apoints = np.empty((num_points, 3), dtype={4:'f4',8:'f8'}[btScalarSize()])\n for i in range(0, points.GetNumberOfTuples()):\n p = points.GetTuple(i)\n apoints[i, 0] = p[0]\n apoints[i, 1] = p[1]\n apoints[i, 2] = p[2]\n\n if scale is not None:\n apoints *= scale\n\n aindices = np.empty(num_triangles*3, dtype=np.int)\n\n for i in range(0, num_triangles):\n c = polydata.GetCell(i)\n aindices[i*3 + 0] = c.GetPointIds().GetId(0)\n aindices[i*3 + 1] = c.GetPointIds().GetId(1)\n aindices[i*3 + 2] = c.GetPointIds().GetId(2)\n\n shape = SiconosMesh(list(aindices), apoints)\n dims = apoints.max(axis=0) - apoints.min(axis=0)\n\n else: # assume convex shape\n coors = dict()\n for i in range(0, points.GetNumberOfTuples()):\n coors[points.GetTuple(i)] = 1\n coors = np.array(coors.keys())\n dims = coors.max(axis=0) - coors.min(axis=0)\n shape = SiconosConvexHull(coors.keys())\n\n return shape, dims\n\nclass ShapeCollection():\n\n \"\"\"\n Instantiation of added contact shapes\n \"\"\"\n\n def __init__(self, io, collision_margin=0.04):\n self._io = io\n self._shapes = dict()\n self._tri = dict()\n self._collision_margin=collision_margin\n # print('self._collision_margin',self._collision_margin)\n if use_proposed:\n\n self._primitive = {'Sphere': SiconosSphere,\n 'Box': SiconosBox,\n 'Cylinder': SiconosCylinder,\n 'Plane': SiconosPlane}\n\n elif use_original and use_bullet:\n\n self._primitive = {'Cylinder': btCylinderShape,\n 'Sphere': btSphereShape,\n 'Box': btBoxShape,\n 'Cone': btConeShape,\n 'Compound': btCompoundShape,\n 'Capsule': btCapsuleShape}\n else:\n self._primitive = dict()\n\n def shape(self, shape_name):\n return self._io.shapes()[shape_name]\n\n def attributes(self, shape_name):\n return self._io.shapes()[shape_name].attrs\n\n def url(self, shape_name):\n if 'url' in self.attributes(shape_name):\n shape_url = self.shape(shape_name).\\\n attrs['url']\n\n elif 'filename' in self.attributes(shape_name):\n shape_url = self.shape(shape_name).\\\n attrs['filename']\n\n else:\n shape_url = self.shape(shape_name)\n\n return shape_url\n\n def get(self, shape_name, shape_class=None, face_class=None,\n edge_class=None, new_instance=False):\n\n if new_instance or not shape_name in self._shapes:\n\n # load shape if it is an existing file\n if not isinstance(self.url(shape_name), str) and \\\n not 'primitive' in self.attributes(shape_name):\n # assume a vtp file (xml) stored in a string buffer\n\n if self.attributes(shape_name)['type'] == 'vtp':\n if self.shape(shape_name).dtype == h5py.new_vlen(str):\n with tmpfile() as tmpf:\n data = self.shape(shape_name)[:][0]\n tmpf[0].write(data)\n tmpf[0].flush()\n scale = None\n if 'scale' in self.attributes(shape_name):\n scale = self.attributes(shape_name)['scale']\n if use_proposed:\n mesh, dims = loadSiconosMesh(tmpf[1], scale=scale)\n self._shapes[shape_name] = mesh\n mesh.setInsideMargin(\n self.shape(shape_name).attrs.get('insideMargin',\n min(dims)*0.02))\n mesh.setOutsideMargin(\n self.shape(shape_name).attrs.get('outsideMargin',0))\n elif use_original:\n (self._tri[shape_name],\n self._shapes[shape_name]) = loadMesh(\n tmpf[1], self._collision_margin, scale=scale)\n else:\n assert False\n elif self.attributes(shape_name)['type'] in['step', 'stp']:\n from OCC.STEPControl import STEPControl_Reader\n from OCC.BRep import BRep_Builder\n from OCC.TopoDS import TopoDS_Compound\n from OCC.IFSelect import IFSelect_RetDone,\\\n IFSelect_ItemsByEntity\n\n builder = BRep_Builder()\n comp = TopoDS_Compound()\n builder.MakeCompound(comp)\n\n assert self.shape(shape_name).dtype == h5py.new_vlen(str)\n\n with tmpfile(contents=self.shape(shape_name)[:][0]) as tmpf:\n step_reader = STEPControl_Reader()\n\n status = step_reader.ReadFile(tmpf[1])\n\n if status == IFSelect_RetDone: # check status\n failsonly = False\n step_reader.PrintCheckLoad(\n failsonly, IFSelect_ItemsByEntity)\n step_reader.PrintCheckTransfer(\n failsonly, IFSelect_ItemsByEntity)\n\n ok = step_reader.TransferRoot(1)\n nbs = step_reader.NbShapes()\n\n for i in range(1, nbs + 1):\n shape = step_reader.Shape(i)\n builder.Add(comp, shape)\n\n self._shapes[shape_name] = comp\n self._io._keep.append(self._shapes[shape_name])\n\n elif self.attributes(shape_name)['type'] in['brep']:\n if not 'contact' in self.attributes(shape_name):\n\n # the reference brep\n if shape_class is None:\n brep_class = occ.OccContactShape\n else:\n brep_class = shape_class\n\n if 'occ_indx' in self.attributes(shape_name):\n\n from OCC.BRepTools import BRepTools_ShapeSet\n shape_set = BRepTools_ShapeSet()\n shape_set.ReadFromString(\n self.shape(shape_name)[:][0])\n the_shape = shape_set.Shape(shape_set.NbShapes())\n location = shape_set.Locations().Location(\n self.attributes(shape_name)['occ_indx'])\n the_shape.Location(location)\n brep = brep_class()\n brep.setData(the_shape)\n\n else:\n # raw brep\n brep = brep_class()\n brep.importBRepFromString(\n self.shape(shape_name)[:][0])\n\n self._shapes[shape_name] = brep\n self._io._keep.append(self._shapes[shape_name])\n\n else:\n # a contact on a brep\n assert 'contact' in self.attributes(shape_name)\n assert 'contact_index' in self.attributes(shape_name)\n assert 'brep' in self.attributes(shape_name)\n contact_index = self.attributes(shape_name)['contact_index']\n\n if shape_class is None:\n brep_class = occ.OccContactShape\n else:\n brep_class = shape_class\n\n ref_brep = self.get(\n self.attributes(shape_name)['brep'], shape_class)\n\n if self.attributes(shape_name)['contact'] == 'Face':\n if face_class is None:\n face_maker = occ.OccContactFace\n else:\n face_maker = face_class\n\n self._shapes[shape_name] = \\\n face_maker(brep_class(ref_brep),\n contact_index)\n\n elif self.attributes(shape_name)['contact'] == 'Edge':\n if edge_class is None:\n edge_maker = occ.OccContactEdge\n else:\n edge_maker = edge_class\n self._shapes[shape_name] = \\\n edge_maker(ref_brep,\n contact_index)\n\n self._io._keep.append(self._shapes[shape_name])\n\n elif self.attributes(shape_name)['type'] in ['heightmap']:\n\n if use_proposed:\n hm_data = self.shape(shape_name)\n r = hm_data.attrs['rect']\n assert(len(r)==2)\n hm = SiconosHeightMap(hm_data, r[0], r[1])\n dims = list(r) + [np.max(hm_data)-np.min(hm_data)]\n hm.setInsideMargin(\n hm_data.attrs.get('insideMargin', np.min(dims)*0.02))\n hm.setOutsideMargin(\n hm_data.attrs.get('outsideMargin', 0))\n\n self._shapes[shape_name] = hm\n else:\n throw\n\n elif self.attributes(shape_name)['type'] in ['convex']:\n # a convex point set\n if use_proposed:\n points = self.shape(shape_name)\n convex = SiconosConvexHull(points)\n dims = [points[:,0].max() - points[:,0].min(),\n points[:,1].max() - points[:,1].min(),\n points[:,2].max() - points[:,2].min()]\n convex.setInsideMargin(\n self.shape(shape_name).attrs.get('insideMargin',\n min(dims)*0.02))\n convex.setOutsideMargin(\n self.shape(shape_name).attrs.get('outsideMargin', 0))\n elif use_original and use_bullet:\n convex = btConvexHullShape()\n convex.setMargin(self._collision_margin)\n for points in self.shape(shape_name):\n convex.addPoint(btVector3(float(points[0]),\n float(points[1]),\n float(points[2])))\n else:\n throw\n self._shapes[shape_name] = convex\n\n else:\n throw\n\n elif isinstance(self.url(shape_name), str) and \\\n os.path.exists(self.url(shape_name)):\n self._tri[shape_name], self._shapes[shape_name] = loadMesh(\n self.url(shape_name), _collision_margin)\n else:\n # it must be a primitive with attributes\n if isinstance(self.url(shape_name), str):\n name = self.url(shape_name)\n attrs = [float(x) for x in self.shape(shape_name)[0]]\n else:\n name = self.attributes(shape_name)['primitive']\n attrs = [float(x) for x in self.shape(shape_name)[0]]\n primitive = self._primitive[name]\n\n if name in ['Box']:\n if use_proposed:\n box = primitive(attrs)\n self._shapes[shape_name] = box\n box.setInsideMargin(\n self.shape(shape_name).attrs.get('insideMargin',\n min(attrs)*0.02))\n box.setOutsideMargin(\n self.shape(shape_name).attrs.get('outsideMargin', 0))\n elif use_original and use_bullet:\n self._shapes[shape_name] = primitive(\n btVector3(attrs[0] / 2,\n attrs[1] / 2,\n attrs[2] / 2))\n\n elif name in ['Cylinder'] and not use_proposed:\n self._shapes[shape_name] = primitive(btVector3(attrs[0],\n attrs[1]/2,\n attrs[0]))\n # elif name in ['Compound']:\n # obj1 = attrs[0]\n # orig1 = attrs[1:4]\n # orie1 = attrs[4:8]\n # obj2 = attrs[8]\n # orig2 = attrs[9:12]\n # orie2 = attrs[12:16]\n # bcols = btCompoundShape()\n # bcols.addChildShape(...\n else: # e.g. name in ['Sphere']:\n prim = self._shapes[shape_name] = primitive(*attrs)\n shp = self.shape(shape_name)\n if use_proposed:\n prim.setInsideMargin(\n shp.attrs.get('insideMargin', min(attrs)*0.02))\n prim.setOutsideMargin(shp.attrs.get('outsideMargin', 0))\n\n return self._shapes[shape_name]\n\n\nclass Hdf5():\n\n \"\"\"a Hdf5 context manager reads at instantiation the translations and\n orientations of collision objects from hdf5 file\n\n It provides functions to output translations and orientations in\n the same file during simulation (output is done by default in\n pos.dat)\n\n with:\n time : float\n object_id : the object id (int)\n px, py, pz : components of the translation (float)\n ow, ox, oy oz : components of an unit quaternion (float)\n\n \"\"\"\n\n def __init__(self, io_filename=None, mode='w',\n broadphase=None, model=None, osi=None, shape_filename=None,\n set_external_forces=None, gravity_scale=None, collision_margin=None,\n use_compression=False, output_domains=False):\n\n if io_filename is None:\n self._io_filename = '{0}.hdf5'.format(\n os.path.splitext(os.path.basename(sys.argv[0]))[0])\n else:\n self._io_filename = io_filename\n self._mode = mode\n self._broadphase = broadphase\n self._model = model\n self._osi = osi\n self._static = {}\n self._shape = None\n self._shapeid = dict()\n self._pinterid = dict()\n self._static_data = None\n self._velocities_data = None\n self._dynamic_data = None\n self._cf_data = None\n self._domain_data = None\n self._solv_data = None\n self._input = None\n self._nslaws_data = None\n self._nslaws = dict()\n self._out = None\n self._data = None\n self._ref = None\n self._permanent_interactions = None\n self._occ_contactors = dict()\n self._joints = None\n self._boundary_conditions = None\n self._io = MechanicsIO()\n self._set_external_forces = set_external_forces\n self._shape_filename = shape_filename\n self._number_of_shapes = 0\n self._number_of_permanent_interactions = 0\n self._number_of_dynamic_objects = 0\n self._number_of_static_objects = 0\n self._gravity_scale = gravity_scale\n self._collision_margin = collision_margin\n self._output_frequency = 1\n self._keep = []\n self._scheduled_births = []\n self._births = dict()\n self._initializing = True\n self._use_compression = use_compression\n self._should_output_domains = output_domains\n self._contact_index_set = 1\n\n def __enter__(self):\n if self._set_external_forces is None:\n self._set_external_forces = self.apply_gravity\n\n if self._gravity_scale is None:\n self._gravity_scale = 1 # 1 => m, 1/100. => cm\n\n self._out = h5py.File(self._io_filename, self._mode)\n self._data = group(self._out, 'data')\n self._ref = group(self._data, 'ref')\n self._permanent_interactions = group(self._data, 'permanent_interactions',\n must_exist=False)\n self._joints = group(self._data, 'joints')\n try:\n self._boundary_conditions = group(self._data, 'boundary_conditions',\n must_exist=(self._mode=='w'))\n except Exception as e :\n print('Warning - group(self._data, boundary_conditions ) : ', e)\n self._static_data = data(self._data, 'static', 9,\n use_compression = self._use_compression)\n self._velocities_data = data(self._data, 'velocities', 8,\n use_compression = self._use_compression)\n self._dynamic_data = data(self._data, 'dynamic', 9,\n use_compression = self._use_compression)\n self._cf_data = data(self._data, 'cf', 15,\n use_compression = self._use_compression)\n if self._should_output_domains or 'domain' in self._data:\n self._domain_data = data(self._data, 'domain', 3,\n use_compression = self._use_compression)\n self._solv_data = data(self._data, 'solv', 4,\n use_compression = self._use_compression)\n self._input = group(self._data, 'input')\n self._nslaws_data = group(self._data, 'nslaws')\n\n if self._shape_filename is None:\n if self._collision_margin:\n self._shape = ShapeCollection(\n io=self, collision_margin=self._collision_margin)\n\n else:\n self._shape = ShapeCollection(io=self)\n else:\n if self._collision_margin:\n self._shape = ShapeCollection(\n io=self._shape_filename,\n collision_margin=self._collision_margin)\n else:\n self._shape = ShapeCollection(io=self._shape_filename)\n return self\n\n def __exit__(self, type_, value, traceback):\n self._out.close()\n\n def apply_gravity(self, body):\n g = constants.g / self._gravity_scale\n weight = [0, 0, - body.scalarMass() * g]\n body.setFExtPtr(weight)\n\n# hdf5 structure\n\n def shapes(self):\n \"\"\"\n Shapes : parameterized primitives or user defined\n (convex set or meshes)\n \"\"\"\n return self._ref\n\n def permanent_interactions(self):\n \"\"\"\n Permanent interactions.\n \"\"\"\n return self._permanent_interactions\n\n def static_data(self):\n \"\"\"\n Coordinates and orientations of static objects.\n \"\"\"\n return self._static_data\n\n def dynamic_data(self):\n \"\"\"\n Coordinates and orientations of dynamic objects.\n \"\"\"\n return self._dynamic_data\n\n def velocities_data(self):\n \"\"\"\n Velocities of dynamic objects\n \"\"\"\n return self._velocities_data\n\n def contact_forces_data(self):\n \"\"\"\n Contact points informations.\n \"\"\"\n return self._cf_data\n\n def domains_data(self):\n \"\"\"\n Contact point domain information.\n \"\"\"\n return self._domain_data\n\n def solver_data(self):\n \"\"\"\n Solver output\n \"\"\"\n return self._solv_data\n\n def instances(self):\n \"\"\"\n Scene objects.\n \"\"\"\n return self._input\n\n def nonsmooth_laws(self):\n \"\"\"\n Non smooth laws between group of contactors.\n \"\"\"\n return self._nslaws_data\n\n def joints(self):\n \"\"\"\n Joints between dynamic objects or between an object and the scenery.\n \"\"\"\n return self._joints\n\n def boundary_conditions(self):\n \"\"\"\n Boundary conditions applied to dynamic objects\n \"\"\"\n return self._boundary_conditions\n\n def importNonSmoothLaw(self, name):\n if self._broadphase is not None:\n nslawClass = getattr(Kernel, self._nslaws_data[name].attrs['type'])\n if nslawClass == Kernel.NewtonImpactFrictionNSL:\n nslaw = nslawClass(float(self._nslaws_data[name].attrs['e']), 0.,\n float(self._nslaws_data[name].attrs['mu']), 3)\n elif nslawClass == Kernel.NewtonImpactNSL:\n nslaw = nslawClass(float(self._nslaws_data[name].attrs['e']))\n elif nslawClass == Kernel.RelayNSL:\n nslaw = nslawClass(int(self._nslaws_data[name].attrs['size']),\n float(self._nslaws_data[name].attrs['lb']),\n float(self._nslaws_data[name].attrs['ub']))\n assert(nslaw)\n self._nslaws[name] = nslaw\n gid1 = int(self._nslaws_data[name].attrs['gid1'])\n gid2 = int(self._nslaws_data[name].attrs['gid2'])\n if gid1 >= 0 and gid2 >= 0:\n if use_proposed:\n self._broadphase.insertNonSmoothLaw(nslaw, gid1, gid2)\n elif use_original:\n self._broadphase.insert(nslaw, gid1, gid2)\n\n def importOccObject(self, name, translation, orientation,\n velocity, contactors, mass, given_inertia, body_class,\n shape_class, face_class, edge_class, number=None):\n\n if mass == 0.:\n # a static object\n pass\n\n else:\n\n if body_class is None:\n body_class = occ.OccBody\n\n if given_inertia is not None:\n inertia = given_inertia\n else:\n from OCC.GProp import GProp_GProps\n from OCC.BRepGProp import brepgprop_VolumeProperties\n from OCC.gp import gp_Ax1, gp_Dir\n\n # compute mass and inertia from associated instances of Volume\n volumes = filter(lambda s: isinstance(s, Volume),\n contactors)\n\n props = GProp_GProps()\n\n for volume in volumes:\n\n iprops = GProp_GProps()\n ishape = occ.OccContactShape(\n self._shape.get(volume.data,\n shape_class, face_class,\n edge_class, new_instance=True)).data()\n\n # the shape relative displacement\n occ.occ_move(ishape, volume.translation)\n\n brepgprop_VolumeProperties(ishape, iprops)\n\n if volume.parameters is not None and \\\n hasattr(volume.parameters, 'density'):\n density = volume.parameters.density\n else:\n density = 1\n\n props.Add(iprops, density)\n\n # in props density=1\n global_density = mass / props.Mass()\n computed_com = props.Center_Of_Mass()\n\n # center of mass shift\n translation = np.subtract(translation,\n [computed_com.Coord(1),\n computed_com.Coord(2),\n computed_com.Coord(3)])\n\n I1 = global_density * props.MomentOfInertia(\n gp_Ax1(computed_com, gp_Dir(1, 0, 0)))\n I2 = global_density * props.MomentOfInertia(\n gp_Ax1(computed_com, gp_Dir(0, 1, 0)))\n I3 = global_density * props.MomentOfInertia(\n gp_Ax1(computed_com, gp_Dir(0, 0, 1)))\n\n # computed_inertia = density * props.MatrixOfInertia()\n inertia = [I1, I2, I3]\n\n body = body_class(\n translation + orientation, velocity, mass, inertia)\n\n if number is not None:\n body.setNumber(number)\n\n for rank, contactor in enumerate(contactors):\n\n contact_shape = None\n reference_shape = occ.OccContactShape(\n self._shape.get(contactor.data,\n shape_class, face_class,\n edge_class, new_instance=True))\n self._keep.append(reference_shape)\n\n if hasattr(contactor, 'contact_type'):\n\n if contactor.contact_type == 'Face':\n contact_shape = \\\n occ.OccContactFace(reference_shape,\n contactor.contact_index)\n\n elif contactor.contact_type == 'Edge':\n contact_shape = \\\n occ.OccContactEdge(reference_shape,\n contactor.contact_index)\n\n if contact_shape is not None:\n\n if name not in self._occ_contactors:\n self._occ_contactors[name] = dict()\n self._occ_contactors[name][contactor.instance_name] = rank\n\n body.addContactShape(contact_shape,\n contactor.translation,\n contactor.orientation,\n contactor.group)\n\n else:\n body.addShape(reference_shape.shape(),\n contactor.translation,\n contactor.orientation)\n\n self._set_external_forces(body)\n\n # add the dynamical system to the non smooth\n # dynamical system\n nsds = self._model.nonSmoothDynamicalSystem()\n nsds.insertDynamicalSystem(body)\n nsds.setName(body, str(name))\n\n def importBulletObject(self, name, translation, orientation,\n velocity, contactors, mass, inertia,\n body_class, shape_class, birth=False,\n number = None):\n\n\n if body_class is None:\n body_class = default_body_class\n\n if self._broadphase is not None and 'input' in self._data:\n body = None\n if use_proposed and mass == 0:\n # a static object\n\n cset = SiconosContactorSet()\n csetpos = (translation + orientation)\n for c in contactors:\n shp = self._shape.get(c.data)\n pos = list(c.translation) + list(c.orientation)\n cset.append(SiconosContactor(shp, pos, c.group))\n print('Adding shape %s to static contactor'%c.data, pos)\n self._broadphase.insertStaticContactorSet(cset, csetpos)\n\n self._static[name] = {\n 'number': number,\n 'origin': translation,\n 'orientation': orientation,\n 'transform': btTransform(btQuaternion(orientation[1],\n orientation[2],\n orientation[3],\n orientation[0]),\n btVector3(translation[0],\n translation[1],\n translation[2])),\n 'shape': shp,\n }\n\n elif use_original and mass == 0. and use_bullet:\n # a static object\n rbase = btQuaternion(orientation[1],\n orientation[2],\n orientation[3],\n orientation[0])\n\n tbase = btVector3(translation[0],\n translation[1],\n translation[2])\n\n for c in contactors:\n\n c_orientation = btQuaternion(c.orientation[1],\n c.orientation[2],\n c.orientation[3],\n c.orientation[0])\n\n c_origin = btVector3(c.translation[0],\n c.translation[1],\n c.translation[2])\n\n static_cobj = btCollisionObject()\n\n BulletDS.setRelativeTransform(static_cobj,\n tbase,\n rbase,\n c_origin,\n c_orientation)\n\n static_cobj.setCollisionFlags(\n btCollisionObject.CF_STATIC_OBJECT)\n\n static_cobj.setCollisionShape(\n self._shape.get(c.data))\n\n self._static[name] = {\n 'number': number,\n 'origin': static_cobj.getWorldTransform().getOrigin(),\n 'orientation': static_cobj.getWorldTransform().getRotation(),\n 'transform': static_cobj.getWorldTransform(),\n 'cobj': static_cobj,\n }\n\n self._broadphase.addStaticObject(static_cobj, int(c.group))\n\n elif use_proposed:\n # a proposed-API moving object\n\n if inertia is not None:\n if np.shape(inertia) == (3,):\n inertia = np.diag(inertia)\n elif np.shape(inertia) != (3,3):\n print('Wrong shape of inertia')\n have_inertia = True\n else:\n have_inertia = False\n\n body = body_class(translation + orientation,\n velocity,\n mass, inertia)\n if have_inertia:\n body.setUseContactorInertia(False)\n\n self_collide = self._input[name].get('allow_self_collide',None)\n if self_collide is not None:\n body.setAllowSelfCollide(not not self_collide)\n\n cset = SiconosContactorSet()\n for c in contactors:\n shp = self._shape.get(c.data)\n pos = list(c.translation) + list(c.orientation)\n cset.append(SiconosContactor(shp, pos, c.group))\n\n body.setContactors(cset)\n\n elif use_original and use_bullet:\n # a Bullet moving object\n bws = BulletWeightedShape(\n self._shape.get(contactors[0].data), mass)\n\n if inertia is not None:\n\n if np.shape(inertia) == (3,):\n bws.setInertia(inertia[0], inertia[1], inertia[2])\n elif (np.shape(inertia) == (3, 3)):\n bws.setInertia(inertia)\n else:\n print('Wrong shape of inertia')\n\n body = body_class(bws,\n translation + orientation,\n velocity,\n contactors[0].translation,\n contactors[0].orientation,\n contactors[0].group)\n\n #body.setNullifyFGyr(True)\n for contactor in contactors[1:]:\n shape_id = self._shapeid[contactor.data]\n\n body.addCollisionShape(self._shape.get(contactor.data),\n contactor.translation,\n contactor.orientation,\n contactor.group)\n\n if body:\n # set id number\n if number is not None:\n body.setNumber(number)\n\n # set external forces\n self._set_external_forces(body)\n\n # add the dynamical system to the non smooth\n # dynamical system\n if birth:\n nsds = self._model.nonSmoothDynamicalSystem()\n if use_proposed:\n nsds.insertDynamicalSystem(body)\n self._model.simulation().prepareIntegratorForDS(\n self._osi, body, self._model,\n self._model.simulation().nextTime())\n self._model.simulation().initialize(self._model, False)\n elif use_original:\n self._broadphase.addDynamicObject(\n body,\n self._model.simulation(),\n self._osi)\n nsds.setName(body, str(name))\n else:\n nsds = self._model.nonSmoothDynamicalSystem()\n nsds.insertDynamicalSystem(body)\n nsds.setName(body, str(name))\n\n def importJoint(self, name):\n if self._broadphase is not None:\n nsds = self._model.nonSmoothDynamicalSystem()\n topo = nsds.topology()\n\n joint_type = self.joints()[name].attrs['type']\n joint_class = getattr(joints, joint_type)\n absolute = self.joints()[name].attrs.get('absolute', None)\n absolute = [[True if absolute else False], []][absolute is None]\n allow_self_collide = self.joints()[name].attrs.get(\n 'allow_self_collide',None)\n stops = self.joints()[name].attrs.get('stops',None)\n nslaws = self.joints()[name].attrs.get('nslaws',None)\n friction = self.joints()[name].attrs.get('friction',None)\n\n ds1_name = self.joints()[name].attrs['object1']\n ds1 = topo.getDynamicalSystem(ds1_name)\n ds2 = None\n\n if 'object2' in self.joints()[name].attrs:\n ds2_name = self.joints()[name].attrs['object2']\n ds2 = topo.getDynamicalSystem(ds2_name)\n try:\n joint = joint_class(ds1, ds2,\n self.joints()[name].attrs['pivot_point'],\n self.joints()[name].attrs['axis'],\n *absolute)\n except NotImplementedError:\n try:\n joint = joint_class(ds1, ds2,\n self.joints()[name].attrs['pivot_point'],\n *absolute)\n except NotImplementedError:\n joint = joint_class(ds1, ds2, *absolute)\n\n else:\n try:\n joint = joint_class(ds1,\n self.joints()[name].attrs['pivot_point'],\n self.joints()[name].attrs['axis'],\n *absolute)\n except NotImplementedError:\n try:\n joint = joint_class(ds1,\n self.joints()[name].attrs['pivot_point'],\n *absolute)\n except NotImplementedError:\n joint = joint_class(ds1, *absolute)\n\n if allow_self_collide is not None:\n joint.setAllowSelfCollide(not not allow_self_collide)\n joint_nslaw = EqualityConditionNSL(joint.numberOfConstraints())\n joint_inter = Interaction(joint_nslaw, joint)\n self._model.nonSmoothDynamicalSystem().\\\n link(joint_inter, ds1, ds2)\n nsds.setName(joint_inter, str(name))\n\n # Add a e=0 joint by default, otherwise user can specify\n # the impact law by name or a list of names for each axis.\n if stops is not None:\n assert np.shape(stops)[1] == 3, 'Joint stops shape must be (?,3)'\n if nslaws is None:\n nslaws = [Kernel.NewtonImpactNSL(0.0)]*np.shape(stops)[0]\n elif isinstance(nslaws,str):\n nslaws = [self._nslaws[nslaws]]*np.shape(stops)[0]\n else:\n assert(np.shape(nslaws)[0]==np.shape(stops)[0])\n nslaws = [self._nslaws[nsl] for nsl in nslaws]\n for n, (nsl, (axis, pos, dir)) in enumerate(zip(nslaws,stops)):\n # \"bool()\" is needed because type of dir is\n # numpy.bool_, which SWIG doesn't handle well.\n stop = joints.JointStopR(joint, pos, bool(dir<0), int(axis))\n stop_inter = Interaction(nsl, stop)\n self._model.nonSmoothDynamicalSystem().\\\n link(stop_inter, ds1, ds2)\n nsds.setName(stop_inter, '%s_stop%d'%(str(name),n))\n\n if friction is not None:\n nslaw = self._nslaws[friction]\n fr = joints.JointFrictionR(joint, 0) #TODO axis list\n fr_inter = Interaction(nslaw, fr)\n self._model.nonSmoothDynamicalSystem().\\\n link(fr_inter, ds1, ds2)\n nsds.setName(fr_inter, str(name)+'_friction')\n\n def importBoundaryConditions(self, name):\n if self._broadphase is not None:\n topo = self._model.nonSmoothDynamicalSystem().\\\n topology()\n\n bc_type = self.boundary_conditions()[name].attrs['type']\n bc_class = getattr(Kernel,bc_type)\n\n print('name = ', name)\n print('object1')\n\n ds1_name = self.boundary_conditions()[name].attrs['object1']\n ds1 = topo.getDynamicalSystem(ds1_name)\n\n\n if ( bc_type == 'HarmonicBC') :\n bc = bc_class(self.boundary_conditions()[name].attrs['indices'],\n self.boundary_conditions()[name].attrs['a'],\n self.boundary_conditions()[name].attrs['b'],\n self.boundary_conditions()[name].attrs['omega'],\n self.boundary_conditions()[name].attrs['phi'])\n\n elif ( bc_type == 'FixedBC' ):\n bc = bc_class(self.boundary_conditions()[name].attrs['indices'])\n\n elif ( bc_type == 'BoundaryCondition' ):\n bc = bc_class(self.boundary_conditions()[name].attrs['indices'],\n self.boundary_conditions()[name].attrs['v'])\n\n # set bc to the ds1\n\n ds1.setBoundaryConditions(bc);\n\n #joint_inter = Interaction(joint_nslaw, joint)\n # self._model.nonSmoothDynamicalSystem().\\\n # link(joint_inter, ds1)\n\n def importPermanentInteraction(self, name):\n \"\"\"\n \"\"\"\n if (self._broadphase is not None and 'input' in self._data\n and self.permanent_interactions() is not None):\n topo = self._model.nonSmoothDynamicalSystem().\\\n topology()\n\n pinter = self.permanent_interactions()[name]\n body1_name=pinter.attrs['body1_name']\n body2_name=pinter.attrs['body2_name']\n\n ds1 = occ.cast_OccBody(topo.getDynamicalSystem(body1_name))\n try:\n ds2 = occ.cast_OccBody(topo.getDynamicalSystem(body2_name))\n except:\n ds2 = None\n\n contactor1_name = pinter.attrs['contactor1_name']\n contactor2_name = pinter.attrs['contactor2_name']\n\n distance_calculator = pinter.attrs['distance_calculator']\n offset = pinter.attrs['offset']\n\n body1 = self._input[body1_name]\n body2 = self._input[body2_name]\n\n ctr1 = body1[contactor1_name]\n ctr2 = body2[contactor2_name]\n\n cg1 = int(ctr1.attrs['group'])\n cg2 = int(ctr2.attrs['group'])\n nslaw = self._broadphase.nslaw(cg1, cg2)\n\n\n print (body1_name, self._occ_contactors[body1_name])\n cocs1_rank = self._occ_contactors[body1_name][contactor1_name]\n cocs1 = ds1.contactShape(cocs1_rank)\n\n if body2_name in self._occ_contactors:\n cocs2_rank = self._occ_contactors[body2_name][contactor2_name]\n cocs2 = ds2.contactShape(cocs2_rank)\n else:\n topods2 = self._shape.get(ctr2.attrs['name'])\n self._keep.append(topods2)\n ocs2 = occ.OccContactShape(topods2)\n\n index2 = int(ctr2.attrs['contact_index'])\n\n ctact_t2 = ctr2.attrs['type']\n\n ctactbuild = {'Face': occ.OccContactFace,\n 'Edge': occ.OccContactEdge}\n\n cocs2 = ctactbuild[ctact_t2](ocs2, index2)\n\n cp1 = occ.ContactPoint(cocs1)\n cp2 = occ.ContactPoint(cocs2)\n\n real_dist_calc = {'cadmbtb': occ.CadmbtbDistanceType,\n 'occ': occ.OccDistanceType}\n\n relation = occ.OccR(cp1, cp2,\n real_dist_calc[distance_calculator]())\n\n relation.setOffset(offset)\n\n inter = Interaction(nslaw, relation)\n\n if ds2 is not None:\n self._model.nonSmoothDynamicalSystem().link(inter, ds1, ds2)\n else:\n self._model.nonSmoothDynamicalSystem().link(inter, ds1)\n\n # keep pointers\n self._keep.append([cocs1, cocs2, cp1,\n cp2, relation])\n\n def importScene(self, time, body_class, shape_class, face_class,\n edge_class):\n \"\"\"\n From the specification given in the hdf5 file with the help of\n add* functions, import into the broadphase object:\n - the static objects\n - the dynamic objects\n - the joints\n - the nonsmooth laws\n that have a specified time of birth <= current time.\n \"\"\"\n\n # Ensure we count up from zero for implicit DS numbering\n DynamicalSystem.resetCount(0)\n\n for shape_name in self._ref:\n self._shapeid[shape_name] = self._ref[shape_name].attrs['id']\n self._number_of_shapes += 1\n\n # import dynamical systems\n if self._broadphase is not None and 'input' in self._data:\n\n dpos_data = self.dynamic_data()\n if dpos_data is not None and len(dpos_data) > 0:\n\n max_time = max(dpos_data[:, 0])\n id_last = np.where(\n abs(dpos_data[:, 0] - max_time) < 1e-9)[0]\n\n else:\n # should not be used\n max_time = None\n id_last = None\n\n for (name, obj) in sorted(self._input.items(),\n key=lambda x: x[0]):\n\n mass = obj.attrs['mass']\n time_of_birth = obj.attrs['time_of_birth']\n\n if time_of_birth >= time:\n #\n # in the future\n #\n bisect.insort_left(self._scheduled_births, time_of_birth)\n if time_of_birth in self._births:\n self._births[time_of_birth].append((name, obj))\n else:\n self._births[time_of_birth] = [(name, obj)]\n else:\n #\n # this is for now\n #\n # cold restart if output previously done\n if mass > 0 and dpos_data is not None and len(dpos_data) > 0:\n print ('Import dynamic object name ', name, 'from current state')\n print (' number of imported object ', obj.attrs['id'])\n\n id_last_inst = np.where(\n dpos_data[id_last, 1] ==\n self.instances()[name].attrs['id'])[0]\n xpos = dpos_data[id_last[id_last_inst[0]], :]\n translation = (xpos[2], xpos[3], xpos[4])\n orientation = (xpos[5], xpos[6], xpos[7], xpos[8])\n\n velocities = self.velocities_data()\n id_vlast = np.where(\n abs(velocities[:, 0] - max_time) < 1e-9)[0]\n\n id_vlast_inst = np.where(\n velocities[id_vlast, 1] ==\n self.instances()[name].attrs['id'])[0]\n xvel = velocities[id_vlast[id_vlast_inst[0]], :]\n velocity = (xvel[2], xvel[3], xvel[4], xvel[5], xvel[6], xvel[7])\n\n # start from initial conditions\n else:\n print ('Import dynamic or static object number ', obj.attrs['id'], 'from initial state')\n print (' object name ', name)\n translation = obj.attrs['translation']\n orientation = obj.attrs['orientation']\n velocity = obj.attrs['velocity']\n\n # bodyframe center of mass\n center_of_mass = floatv(obj.attrs.get('center_of_mass', [0,0,0]))\n\n input_ctrs = [ctr for _n_, ctr in obj.items()]\n\n contactors = []\n occ_type = False\n for ctr in input_ctrs:\n if 'type' in ctr.attrs:\n # occ contact\n occ_type = True\n contactors.append(\n Contactor(\n instance_name=ctr.attrs['instance_name'],\n shape_data=ctr.attrs['name'],\n collision_group=ctr.attrs['group'].astype(int),\n contact_type=ctr.attrs['type'],\n contact_index=ctr.attrs['contact_index'].astype(int),\n relative_translation=np.subtract(ctr.attrs['translation'].astype(float), center_of_mass),\n relative_orientation=ctr.attrs['orientation'].astype(float)))\n elif 'group' in ctr.attrs:\n # bullet contact\n assert not occ_type\n contactors.append(\n Contactor(\n instance_name=ctr.attrs['instance_name'],\n shape_data=ctr.attrs['name'],\n collision_group=ctr.attrs['group'].astype(int),\n relative_translation=np.subtract(ctr.attrs['translation'].astype(float), center_of_mass),\n relative_orientation=ctr.attrs['orientation'].astype(float)))\n else:\n # occ shape\n occ_type = True\n # fix: not only contactors here\n contactors.append(\n Shape(\n instance_name=ctr.attrs['instance_name'],\n shape_data=ctr.attrs['name'],\n relative_translation=np.subtract(ctr.attrs['translation'].astype(float), center_of_mass),\n relative_orientation=ctr.attrs['orientation'].astype(float)))\n\n if 'inertia' in obj.attrs:\n inertia = obj.attrs['inertia']\n else:\n inertia = None\n\n if occ_type:\n # Occ object\n self.importOccObject(\n name, floatv(translation), floatv(orientation),\n floatv(velocity), contactors, float(mass),\n inertia, body_class, shape_class, face_class,\n edge_class,\n number = self.instances()[name].attrs['id'])\n else:\n # Bullet object\n self.importBulletObject(\n name, floatv(translation), floatv(orientation),\n floatv(velocity), contactors, float(mass),\n inertia, body_class, shape_class,\n number = self.instances()[name].attrs['id'])\n # import nslaws\n # note: no time of birth for nslaws and joints\n for name in self._nslaws_data:\n self.importNonSmoothLaw(name)\n\n for name in self.joints():\n self.importJoint(name)\n\n for name in self.boundary_conditions():\n self.importBoundaryConditions(name)\n\n for name in self.permanent_interactions():\n self.importPermanentInteraction(name)\n\n def currentTime(self):\n if self._initializing:\n return self._model.simulation().startingTime()\n else:\n return self._model.simulation().nextTime()\n\n def importBirths(self, body_class=None, shape_class=None,\n face_class=None, edge_class=None,):\n \"\"\"\n Import new objects in the broadphase.\n \"\"\"\n time = self.currentTime()\n\n ind_time = bisect.bisect_left(self._scheduled_births, time)\n\n current_times_of_births = set(self._scheduled_births[:ind_time])\n self._scheduled_births = self._scheduled_births[ind_time:]\n\n #print (time, current_times_of_births)\n for time_of_birth in current_times_of_births:\n #print( \"time_of_birth\", time_of_birth)\n for (name, obj) in self._births[time_of_birth]:\n #print(name,obj)\n translation = obj.attrs['translation']\n orientation = obj.attrs['orientation']\n velocity = obj.attrs['velocity']\n\n input_ctrs = [ctr for _n_, ctr in obj.items()]\n mass = obj.attrs['mass']\n\n contactors = [Contactor(\n instance_name=ctr.attrs['instance_name'],\n shape_data=ctr.attrs['name'],\n collision_group=int(ctr.attrs['group']),\n relative_translation=floatv(ctr.attrs['translation']),\n relative_orientation=floatv(ctr.attrs['orientation']))\n for ctr in input_ctrs]\n\n if 'inertia' in obj.attrs:\n inertia = obj.attrs['inertia']\n else:\n inertia = None\n\n if True in ('type' in self.shapes()[ctr.attrs['name']].attrs\n and self.shapes()[ctr.attrs['name']].attrs['type']\n in ['brep', 'step']\n for ctr in input_ctrs):\n # Occ object\n self.importOccObject(\n name, floatv(translation), floatv(orientation),\n floatv(\n velocity), contactors, float(mass),\n inertia, body_class, shape_class, face_class,\n edge_class, number = self.instances()[name].attrs['id'])\n else:\n # Bullet object\n self.importBulletObject(\n name, floatv(translation), floatv(orientation),\n floatv(velocity), contactors, float(mass),\n inertia, body_class, shape_class, birth=True,\n number = self.instances()[name].attrs['id'])\n\n def outputStaticObjects(self):\n \"\"\"\n Outputs translations and orientations of static objects\n \"\"\"\n time = self.currentTime()\n p = 0\n self._static_data.resize(len(self._static), 0)\n\n for static in self._static.values():\n print('output static object', static['number'])\n translation = static['transform'].getOrigin()\n rotation = static['transform'].getRotation()\n self._static_data[p, :] = \\\n [time,\n static['number'],\n translation.x(),\n translation.y(),\n translation.z(),\n rotation.w(),\n rotation.x(),\n rotation.y(),\n rotation.z()]\n p += 1\n\n def outputDynamicObjects(self, initial=False):\n \"\"\"\n Outputs translations and orientations of dynamic objects.\n \"\"\"\n\n current_line = self._dynamic_data.shape[0]\n\n time = self.currentTime()\n\n positions = self._io.positions(self._model)\n\n if positions is not None:\n\n self._dynamic_data.resize(current_line + positions.shape[0], 0)\n\n times = np.empty((positions.shape[0], 1))\n times.fill(time)\n\n self._dynamic_data[current_line:, :] = np.concatenate((times,\n positions),\n axis=1)\n\n def outputVelocities(self):\n \"\"\"\n Output velocities of dynamic objects\n \"\"\"\n\n current_line = self._dynamic_data.shape[0]\n\n time = self.currentTime()\n\n velocities = self._io.velocities(self._model)\n\n if velocities is not None:\n\n self._velocities_data.resize(current_line + velocities.shape[0], 0)\n\n times = np.empty((velocities.shape[0], 1))\n times.fill(time)\n\n self._velocities_data[current_line:, :] = np.concatenate((times,\n velocities),\n axis=1)\n\n def outputContactForces(self):\n \"\"\"\n Outputs contact forces\n _contact_index_set default value is 1.\n \"\"\"\n if self._model.nonSmoothDynamicalSystem().\\\n topology().indexSetsSize() > 1:\n time = self.currentTime()\n contact_points = self._io.contactPoints(self._model,\n self._contact_index_set)\n\n if contact_points is not None:\n\n current_line = self._cf_data.shape[0]\n self._cf_data.resize(current_line + contact_points.shape[0], 0)\n times = np.empty((contact_points.shape[0], 1))\n times.fill(time)\n\n self._cf_data[current_line:, :] = \\\n np.concatenate((times,\n contact_points),\n axis=1)\n\n def outputDomains(self):\n \"\"\"\n Outputs domains of contact points\n \"\"\"\n if self._model.nonSmoothDynamicalSystem().\\\n topology().indexSetsSize() > 1:\n time = self.currentTime()\n domains = self._io.domains(self._model)\n\n if domains is not None:\n\n current_line = self._domain_data.shape[0]\n self._domain_data.resize(current_line + domains.shape[0], 0)\n times = np.empty((domains.shape[0], 1))\n times.fill(time)\n\n self._domain_data[current_line:, :] = \\\n np.concatenate((times, domains), axis=1)\n\n def outputSolverInfos(self):\n \"\"\"\n Outputs solver #iterations & precision reached\n \"\"\"\n\n time = self.currentTime()\n so = self._model.simulation().oneStepNSProblem(0).\\\n numericsSolverOptions()\n\n current_line = self._solv_data.shape[0]\n self._solv_data.resize(current_line + 1, 0)\n if so.solverId == Numerics.SICONOS_GENERIC_MECHANICAL_NSGS:\n iterations = so.iparam[3]\n precision = so.dparam[2]\n local_precision = so.dparam[3]\n elif so.solverId == Numerics.SICONOS_FRICTION_3D_NSGS:\n iterations = so.iparam[Numerics.SICONOS_IPARAM_ITER_DONE]\n precision = so.dparam[Numerics.SICONOS_DPARAM_RESIDU]\n local_precision = 0.\n # maybe wrong for others\n else:\n iterations = so.iparam[Numerics.SICONOS_IPARAM_ITER_DONE]\n precision = so.dparam[Numerics.SICONOS_DPARAM_RESIDU]\n local_precision = so.dparam[2]\n\n self._solv_data[current_line, :] = [time, iterations, precision,\n local_precision]\n\n def printSolverInfos(self):\n \"\"\"\n Outputs solver #iterations & precision reached\n \"\"\"\n time = self.currentTime()\n so = self._model.simulation().oneStepNSProblem(0).\\\n numericsSolverOptions()\n if so.solverId == Numerics.SICONOS_GENERIC_MECHANICAL_NSGS:\n iterations = so.iparam[3]\n precision = so.dparam[2]\n local_precision = so.dparam[3]\n elif so.solverId == Numerics.SICONOS_FRICTION_3D_NSGS:\n iterations = so.iparam[Numerics.SICONOS_IPARAM_ITER_DONE]\n precision = so.dparam[Numerics.SICONOS_DPARAM_RESIDU]\n local_precision = 0.\n # maybe wrong for others\n else:\n iterations = so.iparam[Numerics.SICONOS_IPARAM_ITER_DONE]\n precision = so.dparam[Numerics.SICONOS_DPARAM_RESIDU]\n local_precision = so.dparam[2]\n\n\n print('SolverInfos at time :', time,\n 'iterations= ', iterations,\n 'precision=', precision,\n 'local_precision=', )\n\n def addMeshFromString(self, name, shape_data, scale=None,\n insideMargin=None, outsideMargin=None):\n \"\"\"\n Add a mesh shape from a string.\n Accepted format : mesh encoded in VTK .vtp format\n \"\"\"\n\n import vtk\n\n if name not in self._ref:\n\n shape = self._ref.create_dataset(name, (1,),\n dtype=h5py.new_vlen(str))\n shape[:] = shape_data\n shape.attrs['id'] = self._number_of_shapes\n shape.attrs['type'] = 'vtp'\n if scale is not None:\n shape.attrs['scale'] = scale\n if insideMargin is not None:\n shape.attrs['insideMargin'] = insideMargin\n if outsideMargin is not None:\n shape.attrs['outsideMargin'] = outsideMargin\n self._shapeid[name] = shape.attrs['id']\n self._number_of_shapes += 1\n\n def addMeshFromFile(self, name, filename, scale=None,\n insideMargin=None, outsideMargin=None):\n \"\"\"\n Add a mesh shape from a file.\n Accepted format : .stl or mesh encoded in VTK .vtp format\n \"\"\"\n\n import vtk\n\n if filename[0] != os.path.sep:\n filename = os.path.join(os.path.split(os.path.abspath(sys.argv[0]))[0],\n filename)\n if name not in self._ref:\n\n if os.path.splitext(filename)[-1][1:] == 'stl':\n reader = vtk.vtkSTLReader()\n reader.SetFileName(filename)\n reader.Update()\n\n if reader.GetErrorCode() != 0:\n print('vtkSTLReader error', reader.GetErrorCode())\n sys.exit(1)\n\n with tmpfile() as tmpf:\n writer = vtk.vtkXMLPolyDataWriter()\n writer.SetInputData(reader.GetOutput())\n writer.SetFileName(tmpf[1])\n writer.Write()\n\n shape_data = str_of_file(tmpf[1])\n\n else:\n assert os.path.splitext(filename)[-1][1:] == 'vtp'\n shape_data = str_of_file(filename)\n\n self.addMeshFromString(name, shape_data, scale=scale,\n insideMargin=insideMargin,\n outsideMargin=outsideMargin)\n\n def addHeightMap(self, name, heightmap, rectangle,\n insideMargin=None, outsideMargin=None):\n \"\"\"\n Add a heightmap represented as a SiconosMatrix\n \"\"\"\n assert(heightmap.shape[0] >= 2)\n assert(heightmap.shape[1] >= 2)\n if name not in self._ref:\n shape = self._ref.create_dataset(name, data=heightmap)\n shape.attrs['id'] = self._number_of_shapes\n shape.attrs['type'] = 'heightmap'\n\n # measurements of the heightfield, i.e. length of sides of\n # the rectangle where heightmap will be placed -- height\n # is represented by heightmap values\n assert(len(rectangle)==2)\n shape.attrs['rect'] = rectangle # tuple (length x, length y)\n\n if insideMargin is not None:\n shape.attrs['insideMargin'] = insideMargin\n if outsideMargin is not None:\n shape.attrs['outsideMargin'] = outsideMargin\n self._shapeid[name] = shape.attrs['id']\n self._number_of_shapes += 1\n\n def addBRepFromString(self, name, shape_data):\n \"\"\"\n Add a brep contained in a string.\n \"\"\"\n if name not in self._ref:\n shape = self._ref.create_dataset(name, (1,),\n dtype=h5py.new_vlen(str))\n if type(shape_data) == str:\n # raw str\n shape[:] = shape_data\n else:\n # __getstate__ as with pythonocc\n shape[:] = shape_data[0]\n shape.attrs['occ_indx'] = shape_data[1]\n\n shape.attrs['id'] = self._number_of_shapes\n shape.attrs['type'] = 'brep'\n\n self._shapeid[name] = shape.attrs['id']\n self._number_of_shapes += 1\n\n def addOccShape(self, name, occ_shape):\n \"\"\"\n Add an OpenCascade TopoDS_Shape.\n \"\"\"\n\n if name not in self._ref:\n\n from OCC.STEPControl import STEPControl_Writer, STEPControl_AsIs\n\n # step format is used for the storage.\n step_writer = STEPControl_Writer()\n\n step_writer.Transfer(occ_shape, STEPControl_AsIs)\n\n shape_data = None\n\n with tmpfile() as tmpf:\n\n status = step_writer.Write(tmpf[1])\n\n tmpf[0].flush()\n shape_data = str_of_file(tmpf[1])\n\n shape = self._ref.create_dataset(name, (1,),\n dtype=h5py.new_vlen(str))\n shape[:] = shape_data\n shape.attrs['id'] = self._number_of_shapes\n shape.attrs['type'] = 'step'\n self._shapeid[name] = shape.attrs['id']\n self._number_of_shapes += 1\n\n def addShapeDataFromFile(self, name, filename):\n \"\"\"\n Add shape data from a file.\n \"\"\"\n if name not in self._ref:\n shape = self._ref.create_dataset(name, (1,),\n dtype=h5py.new_vlen(str))\n shape[:] = str_of_file(filename)\n shape.attrs['id'] = self._number_of_shapes\n try:\n shape.attrs['type'] = os.path.splitext(filename)[1][1:]\n except:\n shape.attrs['type'] = 'unknown'\n\n self._shapeid[name] = shape.attrs['id']\n self._number_of_shapes += 1\n\n def addInteraction(self, name, body1_name, contactor1_name,\n body2_name, contactor2_name,\n distance_calculator='cadmbtb',\n offset=0.0001):\n \"\"\"\n Add permanent interactions between two objects contactors.\n \"\"\"\n if name not in self.permanent_interactions():\n pinter = self.permanent_interactions().\\\n create_dataset(name, (1,),\n dtype=h5py.new_vlen(str))\n pinter.attrs['id'] = self._number_of_permanent_interactions\n pinter.attrs['type'] = 'permanent_interaction'\n pinter.attrs['body1_name'] = body1_name\n pinter.attrs['body2_name'] = body2_name\n pinter.attrs['contactor1_name'] = contactor1_name\n pinter.attrs['contactor2_name'] = contactor2_name\n pinter.attrs['distance_calculator'] = distance_calculator\n pinter.attrs['offset'] = offset\n\n self._pinterid[name] = pinter.attrs['id']\n self._number_of_permanent_interactions += 1\n\n def addConvexShape(self, name, points,\n insideMargin=None, outsideMargin=None):\n \"\"\"\n Add a convex shape defined by a list of points.\n \"\"\"\n if name not in self._ref:\n shape=self._ref.create_dataset(name,\n (np.shape(points)[0],\n np.shape(points)[1]))\n if insideMargin is not None:\n shape.attrs['insideMargin'] = insideMargin\n if outsideMargin is not None:\n shape.attrs['outsideMargin'] = outsideMargin\n shape[:]=points[:]\n shape.attrs['type']='convex'\n shape.attrs['id']=self._number_of_shapes\n self._shapeid[name]=shape.attrs['id']\n self._number_of_shapes += 1\n\n def addPrimitiveShape(self, name, primitive, params,\n insideMargin=None, outsideMargin=None):\n \"\"\"\n Add a primitive shape.\n \"\"\"\n if name not in self._ref:\n shape=self._ref.create_dataset(name, (1, len(params)))\n shape.attrs['id']=self._number_of_shapes\n shape.attrs['type']='primitive'\n shape.attrs['primitive']=primitive\n if insideMargin is not None:\n shape.attrs['insideMargin'] = insideMargin\n if outsideMargin is not None:\n shape.attrs['outsideMargin'] = outsideMargin\n shape[:]=params\n self._shapeid[name]=shape.attrs['id']\n self._number_of_shapes += 1\n\n def addObject(self, name, shapes,\n translation,\n orientation=[1, 0, 0, 0],\n velocity=[0, 0, 0, 0, 0, 0],\n mass=0, center_of_mass=[0, 0, 0],\n inertia=None, time_of_birth=-1,\n allow_self_collide=False):\n \"\"\"Add an object with associated shapes as a list of Volume or\n Contactor objects. Contact detection and processing is\n defined by the Contactor objects. The Volume objects are used for\n the computation of inertia and center of mass if not provided.\n\n Each Contactor and Volume object may have a relative\n translation and a relative orientation expressed in the bodyframe\n coordinates.\n\n Parameters\n ----------\n name: string\n The name of the object.\n\n shapes: iterable\n The list of associated Contactor or Volume objects.\n\n translation: array_like of length 3\n Initial translation of the object (mandatory)\n\n velocity: array_like of length 6\n Initial velocity of the object.\n The components are those of the translation velocity along\n x, y and z axis and the rotation velocity around x, y and\n z axis. The default velocity is [0, 0, 0, 0, 0, 0].\n\n mass: float\n The mass of the object, if it is zero the object is defined as\n a static object involved only in contact detection.\n The default value is zero.\n\n center_of_mass: array_like of length 3\n The position of the center of mass expressed in the body frame\n coordinates.\n\n inertia: array_like of length 3 or 3x3 matrix.\n The principal moments of inertia (array of length 3) or\n a full 3x3 inertia matrix\n\n \"\"\"\n # print(arguments())\n if len(orientation) == 2:\n # axis + angle\n axis=orientation[0]\n assert len(axis) == 3\n angle=orientation[1]\n assert type(angle) is float\n n=sin(angle / 2.) / np.linalg.norm(axis)\n\n ori=[cos(angle / 2.), axis[0] * n, axis[1] * n, axis[2] * n]\n else:\n assert(len(orientation)==4)\n # a given quaternion\n ori=orientation\n\n assert(len(translation)==3)\n\n if name not in self._input:\n\n obj=group(self._input, name)\n\n obj.attrs['time_of_birth']=time_of_birth\n\n obj.attrs['mass']=mass\n obj.attrs['translation']=translation\n obj.attrs['orientation']=ori\n obj.attrs['velocity']=velocity\n obj.attrs['center_of_mass']=center_of_mass\n if inertia is not None:\n obj.attrs['inertia']=inertia\n if allow_self_collide is not None:\n obj.attrs['allow_self_collide']=allow_self_collide\n\n contactors = shapes\n\n for num, ctor in enumerate(contactors):\n\n if ctor.instance_name is not None:\n # a specified name\n instance_name = ctor.instance_name\n else:\n # the default name for contactor\n instance_name = '{0}-{1}'.format(ctor.data, num)\n\n dat = data(obj, instance_name, 0,\n use_compression=self._use_compression)\n\n dat.attrs['instance_name'] = instance_name\n dat.attrs['name'] = ctor.data\n if hasattr(ctor, 'group'):\n dat.attrs['group'] = ctor.group\n\n if hasattr(ctor, 'parameters') and \\\n ctor.parameters is not None:\n dat.attrs['parameters'] = ctor.parameters\n\n if hasattr(ctor, 'contact_type') and \\\n ctor.contact_type is not None:\n dat.attrs['type'] = ctor.contact_type\n\n if hasattr(ctor, 'contact_index') and \\\n ctor.contact_index is not None:\n dat.attrs['contact_index'] = ctor.contact_index\n\n dat.attrs['translation'] = ctor.translation\n dat.attrs['orientation'] = ctor.orientation\n\n if mass == 0:\n obj.attrs['id']=- (self._number_of_static_objects + 1)\n self._number_of_static_objects += 1\n\n else:\n obj.attrs['id']=(self._number_of_dynamic_objects + 1)\n self._number_of_dynamic_objects += 1\n\n return obj.attrs['id']\n\n def addNewtonImpactFrictionNSL(self, name, mu, e=0, collision_group1=0,\n collision_group2=0):\n \"\"\"\n Add a nonsmooth law for contact between 2 groups.\n Only NewtonImpactFrictionNSL are supported.\n name is an user identifiant and must be unique,\n mu is the coefficient of friction,\n e is the coefficient of restitution on the contact normal,\n gid1 and gid2 define the group identifiants.\n\n \"\"\"\n if name not in self._nslaws_data:\n nslaw=self._nslaws_data.create_dataset(name, (0,))\n nslaw.attrs['type']='NewtonImpactFrictionNSL'\n nslaw.attrs['mu']=mu\n nslaw.attrs['e']=e\n nslaw.attrs['gid1']=collision_group1\n nslaw.attrs['gid2']=collision_group2\n\n # Note, default groups are -1 here, indicating not to add them to\n # the nslaw lookup table for contacts, since 1D impacts are\n # useless in this case. They are however useful for joint stops.\n def addNewtonImpactNSL(self, name, e=0, collision_group1=-1,\n collision_group2=-1):\n \"\"\"\n Add a nonsmooth law for contact between 2 groups.\n Only NewtonImpactNSL are supported.\n name is a user identifier and must be unique,\n e is the coefficient of restitution on the contact normal,\n gid1 and gid2 define the group identifiers.\n\n As opposed to addNewtonImpactFrictionNSL, the default groups are\n -1, making the NSL unassociated with point contacts. It can\n by used for joint stops however.\n \"\"\"\n if name not in self._nslaws_data:\n nslaw=self._nslaws_data.create_dataset(name, (0,))\n nslaw.attrs['type']='NewtonImpactNSL'\n nslaw.attrs['e']=e\n nslaw.attrs['gid1']=collision_group1\n nslaw.attrs['gid2']=collision_group2\n\n # Note, default groups are -1 here, indicating not to add them to\n # the nslaw lookup table for contacts, since 1D impacts are\n # useless in this case. They are however useful for joint friction.\n def addRelayNSL(self, name, lb, ub, size=1, collision_group1=-1,\n collision_group2=-1):\n \"\"\"\n Add a nonsmooth law for contact between 2 groups.\n Only NewtonImpactNSL are supported.\n name is a user identifier and must be unique,\n e is the coefficient of restitution on the contact normal,\n gid1 and gid2 define the group identifiers.\n\n As opposed to addNewtonImpactFrictionNSL, the default groups are\n -1, making the NSL unassociated with point contacts. It can\n by used for joint stops however.\n \"\"\"\n if name not in self._nslaws_data:\n nslaw=self._nslaws_data.create_dataset(name, (0,))\n nslaw.attrs['type']='RelayNSL'\n nslaw.attrs['size']=size\n nslaw.attrs['lb']=lb\n nslaw.attrs['ub']=ub\n nslaw.attrs['gid1']=collision_group1\n nslaw.attrs['gid2']=collision_group2\n\n def addJoint(self, name, object1, object2=None, pivot_point=[0, 0, 0],\n axis=[0, 1, 0], joint_class='PivotJointR', absolute=None,\n allow_self_collide=None, nslaws=None, stops=None, friction=None):\n \"\"\"\n add a joint between two objects\n \"\"\"\n if name not in self.joints():\n joint=self.joints().create_dataset(name, (0,))\n joint.attrs['object1']=object1\n if object2 is not None:\n joint.attrs['object2']=object2\n joint.attrs['type']=joint_class\n joint.attrs['pivot_point']=pivot_point\n joint.attrs['axis']=axis\n if absolute in [True, False]:\n joint.attrs['absolute']=absolute\n if allow_self_collide in [True, False]:\n joint.attrs['allow_self_collide']=allow_self_collide\n if nslaws is not None:\n joint.attrs['nslaws'] = nslaws # either name of one nslaw, or a\n # list of names same length as stops\n if stops is not None:\n joint.attrs['stops'] = stops # must be a table of [[axis,pos,dir]..]\n if friction is not None:\n joint.attrs['friction'] = friction # must be an NSL name (e.g. RelayNSL)\n\n\n def addBoundaryCondition(self, name, object1, indices=None, bc_class='HarmonicBC',\n v=None, a=None, b=None, omega=None, phi=None):\n \"\"\"\n add boundarycondition to the object object1\n\n implementation only works for HarmonicBC for the moment\n \"\"\"\n if name not in self.boundary_conditions():\n boundary_condition=self.boundary_conditions().create_dataset(name, (0,))\n boundary_condition.attrs['object1']=object1\n boundary_condition.attrs['indices']=indices\n boundary_condition.attrs['type']=bc_class\n if bc_class == 'HarmonicBC' :\n boundary_condition.attrs['a']= a\n boundary_condition.attrs['b']= b\n boundary_condition.attrs['omega']= omega\n boundary_condition.attrs['phi']= phi\n elif bc_class == 'BoundaryCondition' :\n boundary_condition.attrs['v']= v\n elif bc_class == 'FixedBC' :\n pass # nothing to do\n else:\n raise NotImplementedError\n\n def run(self,\n with_timer=False,\n time_stepping=None,\n space_filter=None,\n options=None,\n body_class=None,\n shape_class=None,\n face_class=None,\n edge_class=None,\n controller=None,\n gravity_scale=1.0,\n t0=0,\n T=10,\n h=0.0005,\n multipoints_iterations=None,\n theta=0.50001,\n Newton_options=Kernel.SICONOS_TS_NONLINEAR,\n Newton_max_iter=20,\n set_external_forces=None,\n solver=Numerics.SICONOS_FRICTION_3D_NSGS,\n itermax=100000,\n tolerance=1e-8,\n projection_itermax=20,\n projection_tolerance=1e-8,\n projection_tolerance_unilateral=1e-8,\n numerics_verbose=False,\n violation_verbose=False,\n output_frequency=None,\n friction_contact_trace=False,\n friction_contact_trace_params=None,\n contact_index_set=1,\n osi=Kernel.MoreauJeanOSI):\n \"\"\"\n Run a simulation from inputs in hdf5 file.\n parameters are:\n with_timer : use a timer for log output (default False)\n gravity_scale : gravity is multiplied by this factor.\n 1. for meters (default).\n 1./100 for centimeters.\n This parameter may be needed for small\n objects because of Bullet collision margin (0.04).\n\n t0 : starting time (default 0)\n T : end time (default 10)\n h : timestep (default 0.0005)\n multiPointIterations : use bullet \"multipoint iterations\"\n (default True)\n theta : parameter for Moreau-Jean OSI (default 0.50001)\n Newton_max_iter : maximum number of iterations for\n integrator Newton loop (default 20)\n set_external_forces : method for external forces\n (default earth gravity)\n solver : default Numerics.SICONOS_FRICTION_3D_NSGS\n itermax : maximum number of iteration for solver\n tolerance : friction contact solver tolerance\n numerics_verbose : set verbose mode in numerics\n output_frequency :\n contact_index_set : index set from which contact points informations are retrieved.\n \"\"\"\n print ('load siconos module ...')\n from siconos.kernel import \\\n Model, NonSmoothDynamicalSystem, OneStepNSProblem,\\\n TimeDiscretisation, GenericMechanical, FrictionContact,\\\n NewtonImpactFrictionNSL\n\n from siconos.numerics import SICONOS_FRICTION_3D_ONECONTACT_NSN\n\n print ('setup model simulation ...')\n if set_external_forces is not None:\n self._set_external_forces=set_external_forces\n\n if space_filter is None: space_filter = default_manager_class\n if time_stepping is None: time_stepping = default_simulation_class\n\n if output_frequency is not None:\n self._output_frequency=output_frequency\n\n if gravity_scale is not None:\n self._gravity_scale=gravity_scale\n\n # cold restart\n times=set()\n if self.dynamic_data() is not None and len(self.dynamic_data()) > 0:\n dpos_data=self.dynamic_data()\n times=set(dpos_data[:, 0])\n t0=float(max(times))\n\n # Time-related parameters for this simulation run\n k0=1+int(t0/h)\n k=k0\n kT=k0+int((T-t0)/h)\n if T > t0:\n print('')\n print('Simulation will run from {0:.4f} to {1:.4f}s, step {2} to step {3} (h={4}, times=[{5},{6}])'\n .format(t0, T, k0, kT, h,\n min(times) if len(times)>0 else '?',\n max(times) if len(times)>0 else '?'))\n print('')\n else:\n print('Simulation time {0} >= T={1}, exiting.'.format(t0,T))\n exit()\n\n # Model\n #\n self._model=Model(t0, T)\n model=self._model\n\n # (1) OneStepIntegrators\n joints=list(self.joints())\n\n self._osi=osi(theta)\n\n # (2) Time discretisation --\n timedisc=TimeDiscretisation(t0, h)\n\n fc_index=0\n if (friction_contact_trace == False) :\n if len(joints) > 0:\n osnspb=GenericMechanical(SICONOS_FRICTION_3D_ONECONTACT_NSN)\n fc_index=1\n else:\n osnspb=FrictionContact(3, solver)\n else:\n osnspb=FrictionContactTrace(3, solver,friction_contact_trace_params,model)\n\n self._contact_index_set = contact_index_set\n\n # Global solver options\n solverOptions = osnspb.numericsSolverOptions()\n solverOptions.iparam[0]=itermax\n # -- full error evaluation\n #solverOptions.iparam[1]=Numerics.SICONOS_FRICTION_3D_NSGS_ERROR_EVALUATION_FULL\n # -- Adaptive error evaluation\n #solverOptions.iparam[1]=Numerics.SICONOS_FRICTION_3D_NSGS_ERROR_EVALUATION_ADAPTIVE\n #solverOptions.iparam[8]=1\n # -- light error evaluation with full final\n solverOptions.iparam[1] = Numerics.SICONOS_FRICTION_3D_NSGS_ERROR_EVALUATION_LIGHT\n solverOptions.iparam[14] = Numerics.SICONOS_FRICTION_3D_NSGS_FILTER_LOCAL_SOLUTION_TRUE\n solverOptions.dparam[0] = tolerance\n\n # Friction one-contact solver options\n fcOptions = solverOptions.internalSolvers[fc_index]\n fcOptions.solverId = Numerics.SICONOS_FRICTION_3D_ONECONTACT_NSN_GP_HYBRID\n fcOptions.iparam[0] = 100 # Local solver iterations\n\n osnspb.setMaxSize(30000)\n osnspb.setMStorageType(1)\n osnspb.setNumericsVerboseMode(numerics_verbose)\n\n # keep previous solution\n osnspb.setKeepLambdaAndYState(True)\n\n # Respect run() parameter for multipoints_iteratinos for\n # backwards compatibility, but this is overridden by\n # SiconosBulletOptions if one is provided.\n if multipoints_iterations is not None and options is None:\n options = SiconosBulletOptions()\n options.perturbationIterations = 3*multipoints_iterations\n options.minimumPointsPerturbationThreshold = 3*multipoints_iterations\n self._broadphase = space_filter(model, options)\n\n if use_original:\n if multipoints_iterations:\n if hasattr(self._broadphase, 'collisionConfiguration'):\n self._broadphase.collisionConfiguration().\\\n setConvexConvexMultipointIterations()\n self._broadphase.collisionConfiguration().\\\n setPlaneConvexMultipointIterations()\n else:\n print(\"\"\"\n ConvexConvexMultipointIterations and PlaneConvexMultipointIterations are unset\n \"\"\")\n\n # (6) Simulation setup with (1) (2) (3) (4) (5)\n if time_stepping == Kernel.TimeSteppingDirectProjection:\n osnspb_pos=Kernel.MLCPProjectOnConstraints(Numerics.SICONOS_MLCP_ENUM, 1.0)\n so_pos = osnspb.numericsSolverOptions()\n so_pos.iparam[0]=itermax\n so_pos.dparam[0]=tolerance\n osnspb_pos.setMaxSize(30000)\n osnspb_pos.setMStorageType(0) # \"not yet implemented for sparse storage\"\n osnspb_pos.setNumericsVerboseMode(numerics_verbose)\n osnspb_pos.setKeepLambdaAndYState(True)\n simulation=time_stepping(timedisc, self._osi, osnspb, osnspb_pos)\n simulation.setProjectionMaxIteration(projection_itermax)\n simulation.setConstraintTolUnilateral(projection_tolerance_unilateral);\n simulation.setConstraintTol(projection_tolerance);\n else:\n simulation=time_stepping(timedisc)\n simulation.insertIntegrator(self._osi)\n simulation.insertNonSmoothProblem(osnspb)\n if use_proposed:\n simulation.insertInteractionManager(self._broadphase)\n\n simulation.setNewtonOptions(Newton_options)\n simulation.setNewtonMaxIteration(Newton_max_iter)\n simulation.setNewtonTolerance(1e-10)\n\n print ('import scene ...')\n self.importScene(t0, body_class, shape_class, face_class, edge_class)\n\n if controller is not None:\n controller.initialize(self)\n\n model.setSimulation(simulation)\n model.initialize()\n print ('first output static and dynamic objects ...')\n self.outputStaticObjects()\n self.outputDynamicObjects()\n\n if self._should_output_domains:\n log(self.outputDomains, with_timer)()\n\n # nsds=model.nonSmoothDynamicalSystem()\n # nds= nsds.getNumberOfDS()\n # for i in range(nds):\n # ds=nsds.dynamicalSystem(i)\n # ds.display()\n # raw_input()\n print ('start simulation ...')\n self._initializing=False\n while simulation.hasNextEvent():\n\n print ('step', k, '<', k0 + int((T - t0) / h))\n\n log(self.importBirths(body_class=body_class,\n shape_class=shape_class,\n face_class=face_class,\n edge_class=edge_class))\n\n if controller is not None:\n controller.step()\n\n if use_original:\n log(self._broadphase.buildInteractions, with_timer)\\\n (model.currentTime())\n\n if (friction_contact_trace == True) :\n osnspb._stepcounter = k\n\n log(simulation.computeOneStep, with_timer)()\n\n if (k % self._output_frequency == 0) or (k == 1):\n print ('output in hdf5 file at step ', k)\n\n log(self.outputDynamicObjects, with_timer)()\n\n log(self.outputVelocities, with_timer)()\n\n log(self.outputContactForces, with_timer)()\n\n if self._should_output_domains:\n log(self.outputDomains, with_timer)()\n\n log(self.outputSolverInfos, with_timer)()\n\n log(self._out.flush)()\n\n\n if use_proposed:\n number_of_contacts = (\n self._broadphase.statistics().new_interactions_created\n + self._broadphase.statistics().existing_interactions_processed)\n elif use_original:\n number_of_contacts = (self._model.simulation()\n .oneStepNSProblem(0).getSizeOutput()//3)\n\n if number_of_contacts > 0 :\n print('number of contacts',self._model.simulation().oneStepNSProblem(0).getSizeOutput()//3)\n self.printSolverInfos()\n\n if violation_verbose and number_of_contacts > 0 :\n if len(simulation.y(0,0)) >0 :\n print('violation info')\n y=simulation.y(0,0)\n yplus= np.zeros((2,len(y)))\n yplus[0,:]=y\n y=np.min(yplus,axis=1)\n violation_max=np.max(-y)\n print(' violation max :',violation_max)\n if (violation_max >= self._collision_margin):\n print(' violation max is larger than the collision_margin')\n lam=simulation.lambda_(1,0)\n print(' lambda max :',np.max(lam))\n #print(' lambda : ',lam)\n #raw_input()\n\n\n if len(simulation.y(1,0)) >0 :\n v=simulation.y(1,0)\n vplus= np.zeros((2,len(v)))\n vplus[0,:]=v\n v=np.max(vplus,axis=1)\n print(' velocity max :',np.max(v))\n print(' velocity min :',np.min(v))\n # #print(simulation.output(1,0))\n\n\n log(simulation.nextStep, with_timer)()\n\n print ('')\n k += 1\n", "sub_path": "io/swig/io/mechanics_io.py", "file_name": "mechanics_io.py", "file_ext": "py", "file_size_in_byte": 100152, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "55", "api": [{"api_name": "siconos.mechanics.collision.bullet.SiconosBulletOptions", "line_number": 135, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.SiconosBulletCollisionManager", "line_number": 136, "usage_type": "call"}, {"api_name": "siconos.kernel.TimeStepping", "line_number": 139, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.BodyDS", "line_number": 140, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.BulletSpaceFilter", "line_number": 143, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.BulletTimeStepping", "line_number": 144, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.BulletDS", "line_number": 145, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccSpaceFilter", "line_number": 148, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 148, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccTimeStepping", "line_number": 149, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 149, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccBody", "line_number": 150, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 150, "usage_type": "name"}, {"api_name": "inspect.getargvalues", "line_number": 167, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 167, "usage_type": "call"}, {"api_name": "tempfile.mkstemp", "line_number": 178, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 202, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 173, "usage_type": "name"}, {"api_name": "time.clock", "line_number": 208, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 211, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 214, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 218, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 218, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 218, "usage_type": "attribute"}, {"api_name": "scipy.constants.g", "line_number": 243, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 243, "usage_type": "name"}, {"api_name": "vtk.vtkMath", "line_number": 288, "usage_type": "call"}, {"api_name": "vtk.vtkQuaternion", "line_number": 289, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 320, "usage_type": "call"}, {"api_name": "math.asin", "line_number": 327, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 339, "usage_type": "call"}, {"api_name": "vtk.vtkXMLPolyDataReader", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 366, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btScalarSize", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 376, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.collision.bullet.btTriangleIndexVertexArray", "line_number": 384, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btGImpactMeshShape", "line_number": 386, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btConvexHullShape", "line_number": 396, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btVector3", "line_number": 399, "usage_type": "call"}, {"api_name": "vtk.vtkXMLPolyDataReader", "line_number": 410, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 423, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btScalarSize", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 433, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.collision.SiconosMesh", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 448, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.SiconosConvexHull", "line_number": 450, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.SiconosSphere", "line_number": 468, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.SiconosBox", "line_number": 469, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.SiconosCylinder", "line_number": 470, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.SiconosPlane", "line_number": 471, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.btCylinderShape", "line_number": 475, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.btSphereShape", "line_number": 476, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.btBoxShape", "line_number": 477, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.btConeShape", "line_number": 478, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.btCompoundShape", "line_number": 479, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.btCapsuleShape", "line_number": 480, "usage_type": "name"}, {"api_name": "h5py.new_vlen", "line_number": 515, "usage_type": "call"}, {"api_name": "OCC.BRep.BRep_Builder", "line_number": 544, "usage_type": "call"}, {"api_name": "OCC.TopoDS.TopoDS_Compound", "line_number": 545, "usage_type": "call"}, {"api_name": "h5py.new_vlen", "line_number": 548, "usage_type": "call"}, {"api_name": "OCC.STEPControl.STEPControl_Reader", "line_number": 551, "usage_type": "call"}, {"api_name": "OCC.IFSelect.IFSelect_RetDone", "line_number": 555, "usage_type": "name"}, {"api_name": "OCC.IFSelect.IFSelect_ItemsByEntity", "line_number": 558, "usage_type": "name"}, {"api_name": "OCC.IFSelect.IFSelect_ItemsByEntity", "line_number": 560, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactShape", "line_number": 577, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 577, "usage_type": "name"}, {"api_name": "OCC.BRepTools.BRepTools_ShapeSet", "line_number": 584, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ.OccContactShape", "line_number": 611, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 611, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactFace", "line_number": 620, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 620, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactEdge", "line_number": 630, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 630, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.SiconosHeightMap", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 648, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.SiconosConvexHull", "line_number": 660, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btConvexHullShape", "line_number": 670, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btVector3", "line_number": 673, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 684, "usage_type": "call"}, {"api_name": "os.path", "line_number": 684, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.collision.bullet.btVector3", "line_number": 708, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btVector3", "line_number": 713, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 760, "usage_type": "call"}, {"api_name": "os.path", "line_number": 760, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 760, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 760, "usage_type": "attribute"}, {"api_name": "siconos.io.io_base.MechanicsIO", "line_number": 787, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 812, "usage_type": "call"}, {"api_name": "{'STEPControl_Reader': 'OCC.STEPControl.STEPControl_Reader', 'BRep_Builder': 'OCC.BRep.BRep_Builder', 'TopoDS_Compound': 'OCC.TopoDS.TopoDS_Compound', 'IFSelect_RetDone': 'OCC.IFSelect.IFSelect_RetDone', 'IFSelect_ItemsByEntity': 'OCC.IFSelect.IFSelect_ItemsByEntity', 'BRepTools_ShapeSet': 'OCC.BRepTools.BRepTools_ShapeSet'}", "line_number": 841, "usage_type": "call"}, {"api_name": "{'STEPControl_Reader': 'OCC.STEPControl.STEPControl_Reader', 'BRep_Builder': 'OCC.BRep.BRep_Builder', 'TopoDS_Compound': 'OCC.TopoDS.TopoDS_Compound', 'IFSelect_RetDone': 'OCC.IFSelect.IFSelect_RetDone', 'IFSelect_ItemsByEntity': 'OCC.IFSelect.IFSelect_ItemsByEntity', 'BRepTools_ShapeSet': 'OCC.BRepTools.BRepTools_ShapeSet'}", "line_number": 845, "usage_type": "call"}, {"api_name": "{'STEPControl_Reader': 'OCC.STEPControl.STEPControl_Reader', 'BRep_Builder': 'OCC.BRep.BRep_Builder', 'TopoDS_Compound': 'OCC.TopoDS.TopoDS_Compound', 'IFSelect_RetDone': 'OCC.IFSelect.IFSelect_RetDone', 'IFSelect_ItemsByEntity': 'OCC.IFSelect.IFSelect_ItemsByEntity', 'BRepTools_ShapeSet': 'OCC.BRepTools.BRepTools_ShapeSet'}", "line_number": 848, "usage_type": "call"}, {"api_name": "{'STEPControl_Reader': 'OCC.STEPControl.STEPControl_Reader', 'BRep_Builder': 'OCC.BRep.BRep_Builder', 'TopoDS_Compound': 'OCC.TopoDS.TopoDS_Compound', 'IFSelect_RetDone': 'OCC.IFSelect.IFSelect_RetDone', 'IFSelect_ItemsByEntity': 'OCC.IFSelect.IFSelect_ItemsByEntity', 'BRepTools_ShapeSet': 'OCC.BRepTools.BRepTools_ShapeSet'}", "line_number": 852, "usage_type": "call"}, {"api_name": "scipy.constants.g", "line_number": 859, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 859, "usage_type": "name"}, {"api_name": "siconos.kernel", "line_number": 940, "usage_type": "argument"}, {"api_name": "siconos.kernel.NewtonImpactFrictionNSL", "line_number": 941, "usage_type": "attribute"}, {"api_name": "siconos.kernel", "line_number": 941, "usage_type": "name"}, {"api_name": "siconos.kernel.NewtonImpactNSL", "line_number": 944, "usage_type": "attribute"}, {"api_name": "siconos.kernel", "line_number": 944, "usage_type": "name"}, {"api_name": "siconos.kernel.RelayNSL", "line_number": 946, "usage_type": "attribute"}, {"api_name": "siconos.kernel", "line_number": 946, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccBody", "line_number": 971, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 971, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.tools.Volume", "line_number": 981, "usage_type": "argument"}, {"api_name": "OCC.GProp.GProp_GProps", "line_number": 984, "usage_type": "call"}, {"api_name": "OCC.GProp.GProp_GProps", "line_number": 988, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ.OccContactShape", "line_number": 989, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 989, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.occ_move", "line_number": 995, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 995, "usage_type": "name"}, {"api_name": "OCC.BRepGProp.brepgprop_VolumeProperties", "line_number": 997, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 1012, "usage_type": "call"}, {"api_name": "OCC.gp.gp_Ax1", "line_number": 1018, "usage_type": "call"}, {"api_name": "OCC.gp.gp_Dir", "line_number": 1018, "usage_type": "call"}, {"api_name": "OCC.gp.gp_Ax1", "line_number": 1020, "usage_type": "call"}, {"api_name": "OCC.gp.gp_Dir", "line_number": 1020, "usage_type": "call"}, {"api_name": "OCC.gp.gp_Ax1", "line_number": 1022, "usage_type": "call"}, {"api_name": "OCC.gp.gp_Dir", "line_number": 1022, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ.OccContactShape", "line_number": 1036, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1036, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactFace", "line_number": 1046, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1046, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactEdge", "line_number": 1051, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1051, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.SiconosContactorSet", "line_number": 1092, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.SiconosContactor", "line_number": 1097, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btTransform", "line_number": 1105, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btQuaternion", "line_number": 1105, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btVector3", "line_number": 1109, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btQuaternion", "line_number": 1117, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btVector3", "line_number": 1122, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btQuaternion", "line_number": 1128, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btVector3", "line_number": 1133, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.btCollisionObject", "line_number": 1137, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.BulletDS.setRelativeTransform", "line_number": 1139, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.BulletDS", "line_number": 1139, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.btCollisionObject.CF_STATIC_OBJECT", "line_number": 1146, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.collision.bullet.btCollisionObject", "line_number": 1146, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 1165, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1166, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1167, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.SiconosContactorSet", "line_number": 1183, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.SiconosContactor", "line_number": 1187, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.bullet.BulletWeightedShape", "line_number": 1193, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1198, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1200, "usage_type": "call"}, {"api_name": "siconos.mechanics.joints", "line_number": 1256, "usage_type": "argument"}, {"api_name": "siconos.kernel.EqualityConditionNSL", "line_number": 1301, "usage_type": "call"}, {"api_name": "siconos.kernel.Interaction", "line_number": 1302, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1310, "usage_type": "call"}, {"api_name": "siconos.kernel.NewtonImpactNSL", "line_number": 1312, "usage_type": "call"}, {"api_name": "siconos.kernel", "line_number": 1312, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 1312, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1314, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1316, "usage_type": "call"}, {"api_name": "siconos.mechanics.joints.JointStopR", "line_number": 1321, "usage_type": "call"}, {"api_name": "siconos.mechanics.joints", "line_number": 1321, "usage_type": "name"}, {"api_name": "siconos.kernel.Interaction", "line_number": 1322, "usage_type": "call"}, {"api_name": "siconos.mechanics.joints.JointFrictionR", "line_number": 1329, "usage_type": "call"}, {"api_name": "siconos.mechanics.joints", "line_number": 1329, "usage_type": "name"}, {"api_name": "siconos.kernel.Interaction", "line_number": 1330, "usage_type": "call"}, {"api_name": "siconos.kernel", "line_number": 1341, "usage_type": "argument"}, {"api_name": "siconos.mechanics.occ.cast_OccBody", "line_number": 1384, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1384, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.cast_OccBody", "line_number": 1386, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1386, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactShape", "line_number": 1417, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1417, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactFace", "line_number": 1423, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 1423, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccContactEdge", "line_number": 1424, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 1424, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.ContactPoint", "line_number": 1428, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1428, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.ContactPoint", "line_number": 1429, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1429, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.CadmbtbDistanceType", "line_number": 1431, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 1431, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccDistanceType", "line_number": 1432, "usage_type": "attribute"}, {"api_name": "siconos.mechanics.occ", "line_number": 1432, "usage_type": "name"}, {"api_name": "siconos.mechanics.occ.OccR", "line_number": 1434, "usage_type": "call"}, {"api_name": "siconos.mechanics.occ", "line_number": 1434, "usage_type": "name"}, {"api_name": "siconos.kernel.Interaction", "line_number": 1439, "usage_type": "call"}, {"api_name": "siconos.kernel.DynamicalSystem.resetCount", "line_number": 1463, "usage_type": "call"}, {"api_name": "siconos.kernel.DynamicalSystem", "line_number": 1463, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 1476, "usage_type": "call"}, {"api_name": "bisect.insort_left", "line_number": 1494, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1508, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1516, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1519, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.tools.Contactor", "line_number": 1545, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 1551, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.tools.Contactor", "line_number": 1557, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 1561, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 1571, "usage_type": "call"}, {"api_name": "bisect.bisect_left", "line_number": 1621, "usage_type": "call"}, {"api_name": "siconos.mechanics.collision.tools.Contactor", "line_number": 1638, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 1709, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1712, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 1731, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1734, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 1753, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1757, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 1774, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1778, "usage_type": "call"}, {"api_name": "siconos.numerics.SICONOS_GENERIC_MECHANICAL_NSGS", "line_number": 1791, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1791, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_FRICTION_3D_NSGS", "line_number": 1795, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1795, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_IPARAM_ITER_DONE", "line_number": 1796, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1796, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_DPARAM_RESIDU", "line_number": 1797, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1797, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_IPARAM_ITER_DONE", "line_number": 1801, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1801, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_DPARAM_RESIDU", "line_number": 1802, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1802, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_GENERIC_MECHANICAL_NSGS", "line_number": 1815, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1815, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_FRICTION_3D_NSGS", "line_number": 1819, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1819, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_IPARAM_ITER_DONE", "line_number": 1820, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1820, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_DPARAM_RESIDU", "line_number": 1821, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1821, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_IPARAM_ITER_DONE", "line_number": 1825, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1825, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_DPARAM_RESIDU", "line_number": 1826, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 1826, "usage_type": "name"}, {"api_name": "h5py.new_vlen", "line_number": 1847, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1869, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1870, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1870, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1870, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 1870, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1870, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 1874, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1874, "usage_type": "attribute"}, {"api_name": "vtk.vtkSTLReader", "line_number": 1875, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1881, "usage_type": "call"}, {"api_name": "vtk.vtkXMLPolyDataWriter", "line_number": 1884, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 1892, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1892, "usage_type": "attribute"}, {"api_name": "h5py.new_vlen", "line_number": 1930, "usage_type": "call"}, {"api_name": "OCC.STEPControl.STEPControl_Writer", "line_number": 1955, "usage_type": "call"}, {"api_name": "OCC.STEPControl.STEPControl_AsIs", "line_number": 1957, "usage_type": "name"}, {"api_name": "h5py.new_vlen", "line_number": 1969, "usage_type": "call"}, {"api_name": "h5py.new_vlen", "line_number": 1982, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 1986, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1986, "usage_type": "attribute"}, {"api_name": "h5py.new_vlen", "line_number": 2003, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 2023, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 2024, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 2107, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 2107, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 2107, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 2109, "usage_type": "call"}, {"api_name": "siconos.kernel.SICONOS_TS_NONLINEAR", "line_number": 2311, "usage_type": "attribute"}, {"api_name": "siconos.kernel", "line_number": 2311, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_FRICTION_3D_NSGS", "line_number": 2314, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 2314, "usage_type": "name"}, {"api_name": "siconos.kernel.MoreauJeanOSI", "line_number": 2326, "usage_type": "attribute"}, {"api_name": "siconos.kernel", "line_number": 2326, "usage_type": "name"}, {"api_name": "siconos.kernel.Model", "line_number": 2399, "usage_type": "call"}, {"api_name": "siconos.mechanics.joints", "line_number": 2403, "usage_type": "name"}, {"api_name": "siconos.kernel.TimeDiscretisation", "line_number": 2408, "usage_type": "call"}, {"api_name": "siconos.mechanics.joints", "line_number": 2412, "usage_type": "argument"}, {"api_name": "siconos.kernel.GenericMechanical", "line_number": 2413, "usage_type": "call"}, {"api_name": "siconos.numerics.SICONOS_FRICTION_3D_ONECONTACT_NSN", "line_number": 2413, "usage_type": "name"}, {"api_name": "siconos.kernel.FrictionContact", "line_number": 2416, "usage_type": "call"}, {"api_name": "siconos.io.FrictionContactTrace.FrictionContactTrace", "line_number": 2418, "usage_type": "call"}, {"api_name": "siconos.numerics.SICONOS_FRICTION_3D_NSGS_ERROR_EVALUATION_LIGHT", "line_number": 2431, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 2431, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_FRICTION_3D_NSGS_FILTER_LOCAL_SOLUTION_TRUE", "line_number": 2432, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 2432, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_FRICTION_3D_ONECONTACT_NSN_GP_HYBRID", "line_number": 2437, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 2437, "usage_type": "name"}, {"api_name": "siconos.mechanics.collision.bullet.SiconosBulletOptions", "line_number": 2451, "usage_type": "call"}, {"api_name": "siconos.kernel.TimeSteppingDirectProjection", "line_number": 2469, "usage_type": "attribute"}, {"api_name": "siconos.kernel", "line_number": 2469, "usage_type": "name"}, {"api_name": "siconos.kernel.MLCPProjectOnConstraints", "line_number": 2470, "usage_type": "call"}, {"api_name": "siconos.kernel", "line_number": 2470, "usage_type": "name"}, {"api_name": "siconos.numerics.SICONOS_MLCP_ENUM", "line_number": 2470, "usage_type": "attribute"}, {"api_name": "siconos.numerics", "line_number": 2470, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 2570, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 2572, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 2573, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 2578, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2585, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 2587, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 2588, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 2589, "usage_type": "call"}]}